diff --git a/go.mod b/go.mod index a4a28106..057fb2d0 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/sirupsen/logrus v1.9.3 go.uber.org/multierr v1.11.0 golang.org/x/net v0.32.0 - google.golang.org/grpc v1.69.0 + google.golang.org/grpc v1.68.1 google.golang.org/protobuf v1.36.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c k8s.io/apimachinery v0.32.0 diff --git a/go.sum b/go.sum index 54904439..1094bf6d 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,6 @@ github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44 github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -111,16 +109,6 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -141,8 +129,8 @@ golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= -google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ= google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 382ad694..3a2092f1 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,6 +73,17 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -95,6 +106,57 @@ func Get(name string) Builder { return nil } +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() +} + // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -362,6 +424,18 @@ type ExitIdler interface { ExitIdle() } +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -374,3 +448,22 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index 7d66cb49..c5197894 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -18,18 +18,7 @@ // Package internal contains code internal to the pickfirst package. package internal -import ( - rand "math/rand/v2" - "time" -) +import "math/rand" -var ( - // RandShuffle pseudo-randomizes the order of addresses. - RandShuffle = rand.Shuffle - // TimeAfterFunc allows mocking the timer for testing connection delay - // related functionality. - TimeAfterFunc = func(d time.Duration, f func()) func() { - timer := time.AfterFunc(d, f) - return func() { timer.Stop() } - } -) +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index ea889981..e069346a 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,7 +23,7 @@ import ( "encoding/json" "errors" "fmt" - rand "math/rand/v2" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 2fc0a71f..985b6edc 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,15 +29,11 @@ import ( "encoding/json" "errors" "fmt" - "net" - "net/netip" "sync" - "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" - expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -54,68 +50,26 @@ func init() { balancer.Register(pickfirstBuilder{}) } -// enableHealthListenerKeyType is a unique key type used in resolver attributes -// to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} - var ( logger = grpclog.Component("pick-first-leaf-lb") // Name is the name of the pick_first_leaf balancer. // It is changed to "pick_first" in init() if this balancer is to be // registered as the default pickfirst. - Name = "pick_first_leaf" - disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.disconnections", - Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_succeeded", - Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_failed", - Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", - Labels: []string{"grpc.target"}, - Default: false, - }) + Name = "pick_first_leaf" ) -const ( - // TODO: change to pick-first when this becomes the default pick_first policy. - logPrefix = "[pick-first-leaf-lb %p] " - // connectionDelayInterval is the time to wait for during the happy eyeballs - // pass before starting the next connection attempt. - connectionDelayInterval = 250 * time.Millisecond -) - -type ipAddrFamily int - -const ( - // ipAddrFamilyUnknown represents strings that can't be parsed as an IP - // address. - ipAddrFamilyUnknown ipAddrFamily = iota - ipAddrFamilyV4 - ipAddrFamilyV6 -) +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{ - cc: cc, - target: bo.Target.String(), - metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder. - - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - cancelConnectionTimer: func() {}, + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -133,13 +87,6 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan return cfg, nil } -// EnableHealthListener updates the state to configure pickfirst for using a -// generic health listener. -func EnableHealthListener(state resolver.State) resolver.State { - state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) - return state -} - type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -157,19 +104,14 @@ type scData struct { subConn balancer.SubConn addr resolver.Address - rawConnectivityState connectivity.State - // The effective connectivity state based on raw connectivity, health state - // and after following sticky TransientFailure behaviour defined in A62. - effectiveState connectivity.State - lastErr error - connectionFailedInFirstPass bool + state connectivity.State + lastErr error } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { sd := &scData{ - rawConnectivityState: connectivity.Idle, - effectiveState: connectivity.Idle, - addr: addr, + state: connectivity.Idle, + addr: addr, } sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ StateListener: func(state balancer.SubConnState) { @@ -186,25 +128,19 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { type pickfirstBalancer struct { // The following fields are initialized at build time and read-only after // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn - target string - metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn // The mutex is used to ensure synchronization of updates triggered // from the idle picker and the already serialized resolver, // SubConn state updates. - mu sync.Mutex - // State reported to the channel based on SubConn states and resolver - // updates. + mu sync.Mutex state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int - cancelConnectionTimer func() - healthCheckingEnabled bool + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int } // ResolverError is called by the ClientConn when the name resolver produces @@ -230,7 +166,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { return } - b.updateBalancerState(balancer.State{ + b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) @@ -239,16 +175,15 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() - b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure b.closeSubConnsLocked() b.addressList.updateAddrs(nil) b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) @@ -271,6 +206,9 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -293,17 +231,16 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - prevAddr := b.addressList.currentAddress() - prevSCData, found := b.subConns.Get(prevAddr) - prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready - b.addressList.updateAddrs(newAddrs) + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true // If the previous ready SubConn exists in new address list, // keep this connection and don't create new SubConns. - if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { return nil } @@ -315,17 +252,18 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // we should still enter CONNECTING because the sticky TF behaviour // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported // due to connectivity failures. - if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { // Start connection attempt at first address. - b.forceUpdateConcludedStateLocked(balancer.State{ + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - b.startFirstPassLocked() + b.requestConnectionLocked() } else if b.state == connectivity.TransientFailure { // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. - b.startFirstPassLocked() + b.requestConnectionLocked() } return nil } @@ -340,7 +278,6 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() - b.cancelConnectionTimer() b.state = connectivity.Shutdown } @@ -350,19 +287,10 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() - if b.state == connectivity.Idle { - b.startFirstPassLocked() - } -} - -func (b *pickfirstBalancer) startFirstPassLocked() { - b.firstPass = true - b.numTF = 0 - // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { - sd.(*scData).connectionFailedInFirstPass = false + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() } - b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { @@ -386,70 +314,6 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) -// as per RFC-8305 section 4. -// Whichever address family is first in the list is followed by an address of -// the other address family; that is, if the first address in the list is IPv6, -// then the first IPv4 address should be moved up in the list to be second in -// the list. It doesn't support configuring "First Address Family Count", i.e. -// there will always be a single member of the first address family at the -// beginning of the interleaved list. -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third -// "unknown" family for interleaving. -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 -func interleaveAddresses(addrs []resolver.Address) []resolver.Address { - familyAddrsMap := map[ipAddrFamily][]resolver.Address{} - interleavingOrder := []ipAddrFamily{} - for _, addr := range addrs { - family := addressFamily(addr.Addr) - if _, found := familyAddrsMap[family]; !found { - interleavingOrder = append(interleavingOrder, family) - } - familyAddrsMap[family] = append(familyAddrsMap[family], addr) - } - - interleavedAddrs := make([]resolver.Address, 0, len(addrs)) - - for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { - // Some IP types may have fewer addresses than others, so we look for - // the next type that has a remaining member to add to the interleaved - // list. - family := interleavingOrder[curFamilyIdx] - remainingMembers := familyAddrsMap[family] - if len(remainingMembers) > 0 { - interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) - familyAddrsMap[family] = remainingMembers[1:] - } - } - - return interleavedAddrs -} - -// addressFamily returns the ipAddrFamily after parsing the address string. -// If the address isn't of the format "ip-address:port", it returns -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when -// using a resolver like passthrough where the address may be a hostname in -// some format that the dialer can resolve. -func addressFamily(address string) ipAddrFamily { - // Parse the IP after removing the port. - host, _, err := net.SplitHostPort(address) - if err != nil { - return ipAddrFamilyUnknown - } - ip, err := netip.ParseAddr(host) - if err != nil { - return ipAddrFamilyUnknown - } - switch { - case ip.Is4() || ip.Is4In6(): - return ipAddrFamilyV4 - case ip.Is6(): - return ipAddrFamilyV6 - default: - return ipAddrFamilyUnknown - } -} - // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses @@ -478,7 +342,6 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - b.cancelConnectionTimer() for _, v := range b.subConns.Values() { sd := v.(*scData) if sd.subConn != selected.subConn { @@ -519,89 +382,46 @@ func (b *pickfirstBalancer) requestConnectionLocked() { } scd := sd.(*scData) - switch scd.rawConnectivityState { + switch scd.state { case connectivity.Idle: scd.subConn.Connect() - b.scheduleNextConnectionLocked() - return case connectivity.TransientFailure: - // The SubConn is being re-used and failed during a previous pass - // over the addressList. It has not completed backoff yet. - // Mark it as having failed and try the next address. - scd.connectionFailedInFirstPass = true + // Try the next address. lastErr = scd.lastErr continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") case connectivity.Connecting: - // Wait for the connection attempt to complete or the timer to fire - // before attempting the next address. - b.scheduleNextConnectionLocked() - return - default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) - return - + // Wait for the SubConn to report success or failure. } - } - - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass if possible. - b.endFirstPassIfPossibleLocked(lastErr) -} - -func (b *pickfirstBalancer) scheduleNextConnectionLocked() { - b.cancelConnectionTimer() - if !b.addressList.hasNext() { return } - curAddr := b.addressList.currentAddress() - cancelled := false // Access to this is protected by the balancer's mutex. - closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { - b.mu.Lock() - defer b.mu.Unlock() - // If the scheduled task is cancelled while acquiring the mutex, return. - if cancelled { - return - } - if b.logger.V(2) { - b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) - } - if b.addressList.increment() { - b.requestConnectionLocked() - } - }) - // Access to the cancellation callback held by the balancer is guarded by - // the balancer's mutex, so it's safe to set the boolean from the callback. - b.cancelConnectionTimer = sync.OnceFunc(func() { - cancelled = true - closeFn() - }) + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() - oldState := sd.rawConnectivityState - sd.rawConnectivityState = newState.ConnectivityState + oldState := sd.state + sd.state = newState.ConnectivityState // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic // is included to check if the current list of active SubConns includes this // SubConn. - if !b.isActiveSCData(sd) { + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { return } if newState.ConnectivityState == connectivity.Shutdown { - sd.effectiveState = connectivity.Shutdown return } - // Record a connection attempt when exiting CONNECTING. - if newState.ConnectivityState == connectivity.TransientFailure { - sd.connectionFailedInFirstPass = true - connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) - } - if newState.ConnectivityState == connectivity.Ready { - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) b.shutdownRemainingLocked(sd) if !b.addressList.seekTo(sd.addr) { // This should not fail as we should have only one SubConn after @@ -609,30 +429,10 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - if !b.healthCheckingEnabled { - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) - } - - sd.effectiveState = connectivity.Ready - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - return - } - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) - } - // Send a CONNECTING update to take the SubConn out of sticky-TF if - // required. - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { - b.updateSubConnHealthState(sd, scs) + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, }) return } @@ -643,24 +443,13 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // a transport is successfully created, but the connection fails // before the SubConn can send the notification for READY. We treat // this as a successful connection and transition to IDLE. - // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second - // part of the if condition below once the issue is fixed. - if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { // Once a transport fails, the balancer enters IDLE and starts from // the first address when the picker is used. b.shutdownRemainingLocked(sd) - sd.effectiveState = newState.ConnectivityState - // READY SubConn interspliced in between CONNECTING and IDLE, need to - // account for that. - if oldState == connectivity.Connecting { - // A known issue (https://github.com/grpc/grpc-go/issues/7862) - // causes a race that prevents the READY state change notification. - // This works around it. - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - } - disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.state = connectivity.Idle b.addressList.reset() - b.updateBalancerState(balancer.State{ + b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.Idle, Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, }) @@ -670,35 +459,32 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if b.firstPass { switch newState.ConnectivityState { case connectivity.Connecting: - // The effective state can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in // TRANSIENT_FAILURE until it's READY. See A62. - if sd.effectiveState != connectivity.TransientFailure { - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) } case connectivity.TransientFailure: sd.lastErr = newState.ConnectionError - sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. Happy Eyeballs will also - // cause out of order updates to arrive. - - if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - b.cancelConnectionTimer() - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - } + // a pass over the previous address list. We ignore such updates. - // End the first pass if we've seen a TRANSIENT_FAILURE from all - // SubConns once. - b.endFirstPassIfPossibleLocked(newState.ConnectionError) + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) } return } @@ -709,7 +495,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.numTF = (b.numTF + 1) % b.subConns.Len() sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { - b.updateBalancerState(balancer.State{ + b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: newState.ConnectionError}, }) @@ -723,95 +509,24 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the -// addresses are tried and their SubConns have reported a failure. -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { - // An optimization to avoid iterating over the entire SubConn map. - if b.addressList.isValid() { - return - } - // Connect() has been called on all the SubConns. The first pass can be - // ended if all the SubConns have reported a failure. - for _, v := range b.subConns.Values() { - sd := v.(*scData) - if !sd.connectionFailedInFirstPass { - return - } - } +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { b.firstPass = false - b.updateBalancerState(balancer.State{ + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. for _, v := range b.subConns.Values() { sd := v.(*scData) - if sd.rawConnectivityState == connectivity.Idle { + if sd.state == connectivity.Idle { sd.subConn.Connect() } } } -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { - activeSD, found := b.subConns.Get(sd.addr) - return found && activeSD == sd -} - -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes - // this SubConn. - if !b.isActiveSCData(sd) { - return - } - sd.effectiveState = state.ConnectivityState - switch state.ConnectivityState { - case connectivity.Ready: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - case connectivity.TransientFailure: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, - }) - case connectivity.Connecting: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - default: - b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) - } -} - -// updateBalancerState stores the state reported to the channel and calls -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate -// updates to the channel. -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { - // In case of TransientFailures allow the picker to be updated to update - // the connectivity error, in all other cases don't send duplicate state - // updates. - if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { - return - } - b.forceUpdateConcludedStateLocked(newState) -} - -// forceUpdateConcludedStateLocked stores the state reported to the channel and -// calls ClientConn.UpdateState(). -// A separate function is defined to force update the ClientConn state since the -// channel doesn't correctly assume that LB policies start in CONNECTING and -// relies on LB policy to send an initial CONNECTING update. -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { - b.state = newState.ConnectivityState - b.cc.UpdateState(newState) -} - type picker struct { result balancer.PickResult err error @@ -868,6 +583,15 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + func (al *addressList) reset() { al.idx = 0 } @@ -890,16 +614,6 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } -// hasNext returns whether incrementing the addressList will result in moving -// past the end of the list. If the list has already moved past the end, it -// returns false. -func (al *addressList) hasNext() bool { - if !al.isValid() { - return false - } - return al.idx+1 < len(al.addresses) -} - // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 80a42d22..260255d3 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - rand "math/rand/v2" + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(rand.IntN(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go deleted file mode 100644 index ea27c4fa..00000000 --- a/vendor/google.golang.org/grpc/balancer/subconn.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package balancer - -import ( - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/resolver" -) - -// A SubConn represents a single connection to a gRPC backend service. -// -// All SubConns start in IDLE, and will not try to connect. To trigger a -// connection attempt, Balancers must call Connect. -// -// If the connection attempt fails, the SubConn will transition to -// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the -// connection attempt succeeds, it will transition to READY. -// -// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. -// -// If a connection re-enters IDLE, Balancers must call Connect again to trigger -// a new connection attempt. -// -// Each SubConn contains a list of addresses. gRPC will try to connect to the -// addresses in sequence, and stop trying the remainder once the first -// connection is successful. However, this behavior is deprecated. SubConns -// should only use a single address. -// -// NOTICE: This interface is intended to be implemented by gRPC, or intercepted -// by custom load balancing poilices. Users should not need their own complete -// implementation of this interface -- they should always delegate to a SubConn -// returned by ClientConn.NewSubConn() by embedding it in their implementations. -// An embedded SubConn must never be nil, or runtime panics will occur. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully close, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which may be - // called when the Producer is no longer needed. Otherwise the producer - // will automatically be closed upon connection loss or subchannel close. - // Should only be called on a SubConn in state Ready. Otherwise the - // producer will be unable to create streams. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() - // RegisterHealthListener registers a health listener that receives health - // updates for a Ready SubConn. Only one health listener can be registered - // at a time. A health listener should be registered each time the SubConn's - // connectivity state changes to READY. Registering a health listener when - // the connectivity state is not READY may result in undefined behaviour. - // This method must not be called synchronously while handling an update - // from a previously registered health listener. - RegisterHealthListener(func(SubConnState)) - // EnforceSubConnEmbedding is included to force implementers to embed - // another implementation of this interface, allowing gRPC to add methods - // without breaking users. - internal.EnforceSubConnEmbedding -} - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Build also returns a close function that will be called when all - // references to the Producer have been given up for a SubConn, or when a - // connectivity state change occurs on the SubConn. The close function - // should always block until all asynchronous cleanup work is completed. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 905817b5..2a4f2878 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -189,7 +189,6 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, - healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil @@ -255,32 +254,12 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) producersMu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer - - // Access to healthData is protected by healthMu. - healthMu sync.Mutex - // healthData is stored as a pointer to detect when the health listener is - // dropped or updated. This is required as closures can't be compared for - // equality. - healthData *healthData -} - -// healthData holds data related to health state reporting. -type healthData struct { - // connectivityState stores the most recent connectivity state delivered - // to the LB policy. This is stored to avoid sending updates when the - // SubConn has already exited connectivity state READY. - connectivityState connectivity.State -} - -func newHealthData(s connectivity.State) *healthData { - return &healthData{connectivityState: s} } // updateState is invoked by grpc to push a subConn state update to the @@ -300,24 +279,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } - // Invalidate the health listener by updating the healthData. - acbw.healthMu.Lock() - // A race may occur if a health listener is registered soon after the - // connectivity state is set but before the stateListener is called. - // Two cases may arise: - // 1. The new state is not READY: RegisterHealthListener has checks to - // ensure no updates are sent when the connectivity state is not - // READY. - // 2. The new state is READY: This means that the old state wasn't Ready. - // The RegisterHealthListener API mentions that a health listener - // must not be registered when a SubConn is not ready to avoid such - // races. When this happens, the LB policy would get health updates - // on the old listener. When the LB policy registers a new listener - // on receiving the connectivity update, the health updates will be - // sent to the new health listener. - acbw.healthData = newHealthData(scs.ConnectivityState) - acbw.healthMu.Unlock() - acbw.stateListener(scs) }) } @@ -412,41 +373,3 @@ func (acbw *acBalancerWrapper) closeProducers() { delete(acbw.producers, pb) } } - -// RegisterHealthListener accepts a health listener from the LB policy. It sends -// updates to the health listener as long as the SubConn's connectivity state -// doesn't change and a new health listener is not registered. To invalidate -// the currently registered health listener, acbw updates the healthData. If a -// nil listener is registered, the active health listener is dropped. -func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { - acbw.healthMu.Lock() - defer acbw.healthMu.Unlock() - // listeners should not be registered when the connectivity state - // isn't Ready. This may happen when the balancer registers a listener - // after the connectivityState is updated, but before it is notified - // of the update. - if acbw.healthData.connectivityState != connectivity.Ready { - return - } - // Replace the health data to stop sending updates to any previously - // registered health listeners. - hd := newHealthData(connectivity.Ready) - acbw.healthData = hd - if listener == nil { - return - } - - acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { - if ctx.Err() != nil || acbw.ccb.balancer == nil { - return - } - // Don't send updates if a new listener is registered. - acbw.healthMu.Lock() - defer acbw.healthMu.Unlock() - curHD := acbw.healthData - if curHD != hd { - return - } - listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) - }) -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 9e9d0806..55bffaa7 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.34.2 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -274,9 +274,11 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GrpcLogEntry) String() string { @@ -287,7 +289,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -438,9 +440,11 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ClientHeader) String() string { @@ -451,7 +455,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -505,9 +509,11 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServerHeader) String() string { @@ -518,7 +524,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -559,9 +565,11 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Trailer) String() string { @@ -572,7 +580,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -630,9 +638,11 @@ type Message struct { func (x *Message) Reset() { *x = Message{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Message) String() string { @@ -643,7 +653,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -703,9 +713,11 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Metadata) String() string { @@ -716,7 +728,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -750,9 +762,11 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *MetadataEntry) String() string { @@ -763,7 +777,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -806,9 +820,11 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Address) String() string { @@ -819,7 +835,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1041,6 +1057,104 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 4f57b554..19763f8e 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -775,7 +775,10 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - balCfg := cc.sc.lbConfig + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig + } bw := cc.balancerWrapper cc.mu.Unlock() @@ -1371,7 +1374,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1445,7 +1448,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := internal.HealthCheckFunc + healthCheckFunc := ac.cc.dopts.healthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1477,7 +1480,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 959c2f99..e840858b 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.SliceBuffer(data)}, nil + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7494ae59..518692c3 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -87,6 +87,7 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool + healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -444,6 +445,10 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -657,6 +662,16 @@ func WithDisableHealthCheck() DialOption { }) } +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -667,6 +682,7 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a..1d827dd5 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,7 +23,6 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" - "google.golang.org/grpc/stats" ) func init() { @@ -35,7 +34,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = stats.NewMetricSet() +var DefaultMetrics = NewMetrics() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -43,7 +42,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name string + Name Metric // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -155,27 +154,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[string]bool) +var registeredMetrics = make(map[Metric]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[string]*MetricDescriptor) +var metricsRegistry = make(map[Metric]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metricName string) *MetricDescriptor { - return metricsRegistry[metricName] +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] } -func registerMetric(metricName string, def bool) { - if registeredMetrics[metricName] { - logger.Fatalf("metric %v already registered", metricName) +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) } - registeredMetrics[metricName] = true + registeredMetrics[name] = true if def { - DefaultMetrics = DefaultMetrics.Add(metricName) + DefaultMetrics = DefaultMetrics.Add(name) } } @@ -257,8 +256,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[string]bool) - metricsRegistry = make(map[string]*MetricDescriptor) + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index bf9e7f98..3221f7a6 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,6 +19,8 @@ // Package stats contains experimental metrics/stats API's. package stats +import "maps" + // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { // RecordInt64Count records the measurement alongside labels on the int @@ -37,3 +39,76 @@ type MetricsRecorder interface { // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index ed90060c..07df71e9 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,22 +101,6 @@ var severityName = []string{ fatalLog: "FATAL", } -// sprintf is fmt.Sprintf. -// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. -var sprintf = fmt.Sprintf - -// sprint is fmt.Sprint. -// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. -var sprint = fmt.Sprint - -// sprintln is fmt.Sprintln. -// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. -var sprintln = fmt.Sprintln - -// exit is os.Exit. -// This var exists to make it possible to test functions calling os.Exit. -var exit = os.Exit - // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -127,7 +111,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, sevStr+": "+s) + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) return } // TODO: we can also include the logging component, but that needs more @@ -139,79 +123,55 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) printf(severity int, format string, args ...any) { - // Note the discard check is duplicated in each print func, rather than in - // output, to avoid the expensive Sprint calls. - // De-duplicating this by moving to output would be a significant performance regression! - if lg := g.m[severity]; lg.Writer() == io.Discard { - return - } - g.output(severity, sprintf(format, args...)) -} - -func (g *loggerT) print(severity int, v ...any) { - if lg := g.m[severity]; lg.Writer() == io.Discard { - return - } - g.output(severity, sprint(v...)) -} - -func (g *loggerT) println(severity int, v ...any) { - if lg := g.m[severity]; lg.Writer() == io.Discard { - return - } - g.output(severity, sprintln(v...)) -} - func (g *loggerT) Info(args ...any) { - g.print(infoLog, args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...any) { - g.println(infoLog, args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...any) { - g.printf(infoLog, format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...any) { - g.print(warningLog, args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...any) { - g.println(warningLog, args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...any) { - g.printf(warningLog, format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...any) { - g.print(errorLog, args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...any) { - g.println(errorLog, args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...any) { - g.printf(errorLog, format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...any) { - g.print(fatalLog, args...) - exit(1) + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.println(fatalLog, args...) - exit(1) + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.printf(fatalLog, format, args...) - exit(1) + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { @@ -226,42 +186,19 @@ type LoggerV2Config struct { FormatJSON bool } -// combineLoggers returns a combined logger for both higher & lower severity logs, -// or only one if the other is io.Discard. -// -// This uses io.Discard instead of io.MultiWriter when all loggers -// are set to io.Discard. Both this package and the standard log package have -// significant optimizations for io.Discard, which io.MultiWriter lacks (as of -// this writing). -func combineLoggers(lower, higher io.Writer) io.Writer { - if lower == io.Discard { - return higher - } - if higher == io.Discard { - return lower - } - return io.MultiWriter(lower, higher) -} - // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - - warningW = combineLoggers(infoW, warningW) - errorW = combineLoggers(errorW, warningW) - - fatalW := errorW - - m := []*log.Logger{ - log.New(infoW, "", flag), - log.New(warningW, "", flag), - log.New(errorW, "", flag), - log.New(fatalW, "", flag), - } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b6ae7f25..b15cf482 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - rand "math/rand/v2" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 3afc1813..20b4dc3d 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,6 +29,8 @@ import ( ) var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -147,20 +149,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) - // NewXDSResolverWithClientForTesting creates a new xDS resolver builder - // using the provided xDS client instead of creating a new one using the - // bootstrap configuration specified by the supported environment variables. - // The resolver.Builder is meant to be used in conjunction with the - // grpc.WithResolvers DialOption. The resolver.Builder does not take - // ownership of the provided xDS client and it is the responsibility of the - // caller to close the client when no longer required. - // - // Testing Only - // - // This function should ONLY be used for testing and may not work with some - // other features, including the CSDS service. - NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -267,9 +255,3 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" - -// EnforceSubConnEmbedding is used to enforce proper SubConn implementation -// embedding. -type EnforceSubConnEmbedding interface { - enforceSubConnEmbedding() -} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95..374c12fb 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,9 +24,8 @@ import ( "context" "encoding/json" "fmt" - rand "math/rand/v2" + "math/rand" "net" - "net/netip" "os" "strconv" "strings" @@ -123,7 +122,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, err := formatIP(host); err == nil { + if ipAddr, ok := formatIP(host); ok { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -261,9 +260,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, err := formatIP(a) - if err != nil { - return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -323,9 +322,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, err := formatIP(a) - if err != nil { - return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -352,19 +351,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns an error if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and error = nil. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// error = nil. -func formatIP(addr string) (string, error) { - ip, err := netip.ParseAddr(addr) - if err != nil { - return "", err +// ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false } - if ip.Is4() { - return addr, nil + if ip.To4() != nil { + return addr, true } - return "[" + addr + "]", nil + return "[" + addr + "]", true } // parseTarget takes the user input target string and default port, returns @@ -380,7 +379,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if _, err := netip.ParseAddr(target); err == nil { + if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -428,7 +427,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.IntN(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go deleted file mode 100644 index 8ed347c5..00000000 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ /dev/null @@ -1,144 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "sync/atomic" - - "golang.org/x/net/http2" - "google.golang.org/grpc/mem" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// ClientStream implements streaming functionality for a gRPC client. -type ClientStream struct { - *Stream // Embed for common stream functionality. - - ct *http2Client - done chan struct{} // closed at the end of stream to unblock writers. - doneFunc func() // invoked at the end of stream. - - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). - headerValid bool - header metadata.MD // the received header metadata - noHeaders bool // set if the client never received headers (set only after the stream is done). - - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream - - status *status.Status // the status error received from the server -} - -// Read reads an n byte message from the input stream. -func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { - b, err := s.Stream.read(n) - if err == nil { - s.ct.incrMsgRecv() - } - return b, err -} - -// Close closes the stream and popagates err to any readers. -func (s *ClientStream) Close(err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -// Write writes the hdr and data bytes to the output stream. -func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { - return s.ct.write(s, hdr, data, opts) -} - -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *ClientStream) BytesReceived() bool { - return s.bytesReceived.Load() -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *ClientStream) Unprocessed() bool { - return s.unprocessed.Load() -} - -func (s *ClientStream) waitOnHeader() { - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.Close(ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *ClientStream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *ClientStream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. Acquires the key-value -// pairs of header metadata once it is available. It blocks until i) the -// metadata is ready or ii) there is no header metadata or iii) the stream is -// canceled/expired. -func (s *ClientStream) Header() (metadata.MD, error) { - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. -func (s *ClientStream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *ClientStream) Status() *status.Status { - return s.status -} diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224..97198c51 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,11 +92,14 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked < f.limit/4 { + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 f.updateEffectiveWindowSize() - return 0 + return w } - return f.reset() + f.updateEffectiveWindowSize() + return 0 } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index d9305a65..ce878693 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.Bu return nil } -func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,18 +408,16 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &ServerStream{ - Stream: &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, - buf: newRecvBuffer(), - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - }, + s := &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, cancel: cancel, + buf: newRecvBuffer(), st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -473,7 +471,9 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) incrMsgRecv() {} +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f323ab7f..62b81885 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -123,7 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*ClientStream + activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -199,10 +199,10 @@ func isTemporary(err error) bool { return true } -// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -339,7 +339,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*ClientStream), + activeStreams: make(map[uint32]*Stream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -480,19 +480,17 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &ClientStream{ - Stream: &Stream{ - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - contentSubtype: callHdr.ContentSubtype, - }, - ct: t, - done: make(chan struct{}), - headerChan: make(chan struct{}), - doneFunc: callHdr.DoneFunc, + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -508,7 +506,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - s.Close(err) + t.CloseStream(s, err) }, }, windowHandler: func(n int) { @@ -599,6 +597,12 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -734,7 +738,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -759,7 +763,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return } // The stream was unprocessed by the server. - s.unprocessed.Store(true) + atomic.StoreUint32(&s.unprocessed, 1) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -904,7 +908,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return s, nil } -func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1067,7 +1085,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { reader := data.Reader() if opts.Last { @@ -1096,11 +1114,10 @@ func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, o _ = reader.Close() return err } - t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *ClientStream { +func (t *http2Client) getStream(f http2.Frame) *Stream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1110,7 +1127,7 @@ func (t *http2Client) getStream(f http2.Frame) *ClientStream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { +func (t *http2Client) adjustWindow(s *Stream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1119,7 +1136,7 @@ func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *ClientStream, n uint32) { +func (t *http2Client) updateWindow(s *Stream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1225,7 +1242,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - s.unprocessed.Store(true) + atomic.StoreUint32(&s.unprocessed, 1) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1366,11 +1383,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*ClientStream, 0) + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - stream.unprocessed.Store(true) + atomic.StoreUint32(&stream.unprocessed, 1) streamsToClose = append(streamsToClose, stream) } } @@ -1422,7 +1439,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - s.bytesReceived.Store(true) + atomic.StoreUint32(&s.bytesReceived, 1) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1792,18 +1809,14 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) incrMsgSent() { - if channelz.IsOn() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) - } +func (t *http2Client) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) } -func (t *http2Client) incrMsgRecv() { - if channelz.IsOn() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) - } +func (t *http2Client) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0055fddd..584b50fe 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - rand "math/rand/v2" + "math/rand" "net" "net/http" "strconv" @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*ServerStream + activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*ServerStream), + activeStreams: make(map[uint32]*Stream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,13 +385,11 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &ServerStream{ - Stream: &Stream{ - id: streamID, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - }, + s := &Stream{ + id: streamID, st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -539,6 +537,12 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } } t.mu.Lock() if t.state != reachable { @@ -630,7 +634,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -694,7 +698,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre } } -func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -712,7 +716,7 @@ func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { +func (t *http2Server) adjustWindow(s *Stream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -722,7 +726,7 @@ func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *ServerStream, n uint32) { +func (t *http2Server) updateWindow(s *Stream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -959,7 +963,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *ServerStream) error { +func (t *http2Server) streamContextErr(s *Stream) error { select { case <-t.done: return ErrConnClosing @@ -969,7 +973,7 @@ func (t *http2Server) streamContextErr(s *ServerStream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1002,7 +1006,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *ServerStream) error { +func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1042,7 +1046,7 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1113,11 +1117,11 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.writeHeader(s, nil); err != nil { + if err := t.WriteHeader(s, nil); err != nil { _ = reader.Close() return err } @@ -1143,7 +1147,6 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ _ = reader.Close() return err } - t.incrMsgSent() return nil } @@ -1273,7 +1276,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1294,7 +1297,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1318,7 +1321,7 @@ func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrC } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1412,18 +1415,14 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) incrMsgSent() { - if channelz.IsOn() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) - } +func (t *http2Server) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) } -func (t *http2Server) incrMsgRecv() { - if channelz.IsOn() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) - } +func (t *http2Server) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } func (t *http2Server) getOutFlowWindow() int64 { @@ -1456,7 +1455,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int64N(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go deleted file mode 100644 index a22a9015..00000000 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "context" - "errors" - "strings" - "sync" - "sync/atomic" - - "google.golang.org/grpc/mem" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// ServerStream implements streaming functionality for a gRPC server. -type ServerStream struct { - *Stream // Embed for common stream functionality. - - st internalServerTransport - ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) - cancel context.CancelFunc // invoked at the end of stream to cancel ctx. - - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. - clientAdvertisedCompressors string - headerWireLength int - - // hdrMu protects outgoing header and trailer metadata. - hdrMu sync.Mutex - header metadata.MD // the outgoing header metadata. Updated by WriteHeader. - headerSent atomic.Bool // atomically set when the headers are sent out. -} - -// Read reads an n byte message from the input stream. -func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { - b, err := s.Stream.read(n) - if err == nil { - s.st.incrMsgRecv() - } - return b, err -} - -// SendHeader sends the header metadata for the given stream. -func (s *ServerStream) SendHeader(md metadata.MD) error { - return s.st.writeHeader(s, md) -} - -// Write writes the hdr and data bytes to the output stream. -func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { - return s.st.write(s, hdr, data, opts) -} - -// WriteStatus sends the status of a stream to the client. WriteStatus is -// the final call made on a stream and always occurs. -func (s *ServerStream) WriteStatus(st *status.Status) error { - return s.st.writeStatus(s, st) -} - -// isHeaderSent indicates whether headers have been sent. -func (s *ServerStream) isHeaderSent() bool { - return s.headerSent.Load() -} - -// updateHeaderSent updates headerSent and returns true -// if it was already set. -func (s *ServerStream) updateHeaderSent() bool { - return s.headerSent.Swap(true) -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *ServerStream) RecvCompress() string { - return s.recvCompress -} - -// SendCompress returns the send compressor name. -func (s *ServerStream) SendCompress() string { - return s.sendCompress -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *ServerStream) ContentSubtype() string { - return s.contentSubtype -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *ServerStream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *ServerStream) SetContext(ctx context.Context) { - s.ctx = ctx -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *ServerStream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Header returns the header metadata of the stream. It returns the out header -// after t.WriteHeader is called. It does not block and must not be called -// until after WriteHeader. -func (s *ServerStream) Header() (metadata.MD, error) { - // Return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. -func (s *ServerStream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// This should not be called in parallel to other data writes. -func (s *ServerStream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. -// This should not be called parallel to other data writes. -func (s *ServerStream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 2859b877..e12cb0bc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,6 +27,7 @@ import ( "fmt" "io" "net" + "strings" "sync" "sync/atomic" "time" @@ -38,6 +39,7 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -131,7 +133,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -140,9 +142,9 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readMessageHeaderClient(header) + n, r.err = r.readHeaderClient(header) } else { - n, r.err = r.readMessageHeader(header) + n, r.err = r.readHeader(header) } return n, r.err } @@ -172,12 +174,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readMessageHeaderAdditional(m, header) + return r.readHeaderAdditional(m, header) } } @@ -190,7 +192,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -211,9 +213,9 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readMessageHeaderAdditional(m, header) + return r.readHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readMessageHeaderAdditional(m, header) + return r.readHeaderAdditional(m, header) } } @@ -244,7 +246,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -286,8 +288,14 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - ctx context.Context // the associated context of the stream - method string // the associated RPC method of the stream + st ServerTransport // nil for client side Stream + ct ClientTransport // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -295,17 +303,58 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + headerWireLength int // Only set on server side. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + state streamState + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string +} - trailer metadata.MD // the key-value map of trailer metadata. +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 } func (s *Stream) swapState(st streamState) streamState { @@ -320,12 +369,110 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. +// after the entire stream is done, it could return an empty MD. Client +// side only. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - return s.trailer.Copy() + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype } // Context returns the context of the stream. @@ -333,31 +480,90 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// ReadMessageHeader reads data into the provided header slice from the stream. -// It first checks if there was an error during a previous read operation and +// ReadHeader reads data into the provided header slice from the stream. It +// first checks if there was an error during a previous read operation and // returns it if present. It then requests a read operation for the length of // the header. It continues to read from the stream until the entire header -// slice is filled or an error occurs. If an `io.EOF` error is encountered with -// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an -// unexpected end of the stream. The method returns any error encountered during -// the read process or nil if the header was successfully read. -func (s *Stream) ReadMessageHeader(header []byte) (err error) { +// slice is filled or an error occurs. If an `io.EOF` error is encountered +// with partially read data, it is converted to `io.ErrUnexpectedEOF` to +// indicate an unexpected end of the stream. The method returns any error +// encountered during the read process or nil if the header was successfully +// read. +func (s *Stream) ReadHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadMessageHeader(header) + n, err := s.trReader.ReadHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -373,7 +579,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -413,8 +619,8 @@ type transportReader struct { er error } -func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { - n, err := t.reader.ReadMessageHeader(header) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err return 0, err @@ -433,6 +639,17 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -508,9 +725,15 @@ type ConnectOptions struct { BufferPool mem.BufferPool } -// WriteOptions provides additional hints and information for message +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +} + +// Options provides additional hints and information for message // transmission. -type WriteOptions struct { +type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -559,8 +782,18 @@ type ClientTransport interface { // It does not block. GracefulClose() + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -580,6 +813,12 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -589,7 +828,19 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*ServerStream)) + HandleStreams(context.Context, func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -601,14 +852,12 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) -} -type internalServerTransport interface { - ServerTransport - writeHeader(s *ServerStream, md metadata.MD) error - write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error - writeStatus(s *ServerStream, st *status.Status) error - incrMsgRecv() + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 65002e2c..228e9c2f 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,11 +22,6 @@ import ( "io" ) -const ( - // 32 KiB is what io.Copy uses. - readAllBufSize = 32 * 1024 -) - // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -224,58 +219,8 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is -// added to the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -// -// Important: A failed call returns a non-nil error and may also return -// partially read buffers. It is the responsibility of the caller to free the -// BufferSlice returned, or its memory will not be reused. -func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { - var result BufferSlice - if wt, ok := r.(io.WriterTo); ok { - // This is more optimal since wt knows the size of chunks it wants to - // write and, hence, we can allocate buffers of an optimal size to fit - // them. E.g. might be a single big chunk, and we wouldn't chop it - // into pieces. - w := NewWriter(&result, pool) - _, err := wt.WriteTo(w) - return result, err - } -nextBuffer: - for { - buf := pool.Get(readAllBufSize) - // We asked for 32KiB but may have been given a bigger buffer. - // Use all of it if that's the case. - *buf = (*buf)[:cap(*buf)] - usedCap := 0 - for { - n, err := r.Read((*buf)[usedCap:]) - usedCap += n - if err != nil { - if usedCap == 0 { - // Nothing in this buf, put it back - pool.Put(buf) - } else { - *buf = (*buf)[:usedCap] - result = append(result, NewBuffer(buf, pool)) - } - if err == io.EOF { - err = nil - } - return result, err - } - if len(*buf) == usedCap { - result = append(result, NewBuffer(buf, pool)) - continue nextBuffer - } - } - } -} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969..e87a17f3 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index 58019722..e1f58104 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.34.2 // protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto @@ -64,9 +64,11 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServerReflectionRequest) String() string { @@ -77,7 +79,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -206,9 +208,11 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ExtensionRequest) String() string { @@ -219,7 +223,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -270,9 +274,11 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServerReflectionResponse) String() string { @@ -283,7 +289,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -402,9 +408,11 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FileDescriptorResponse) String() string { @@ -415,7 +423,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -452,9 +460,11 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ExtensionNumberResponse) String() string { @@ -465,7 +475,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -507,9 +517,11 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListServiceResponse) String() string { @@ -520,7 +532,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -556,9 +568,11 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServiceResponse) String() string { @@ -569,7 +583,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -604,9 +618,11 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ErrorResponse) String() string { @@ -617,7 +633,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -805,6 +821,104 @@ func file_grpc_reflection_v1_reflection_proto_init() { if File_grpc_reflection_v1_reflection_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 4d7b654e..0582e16a 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.34.2 // protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. @@ -64,9 +64,11 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServerReflectionRequest) String() string { @@ -77,7 +79,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -227,9 +229,11 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ExtensionRequest) String() string { @@ -240,7 +244,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -297,9 +301,11 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServerReflectionResponse) String() string { @@ -310,7 +316,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -447,9 +453,11 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FileDescriptorResponse) String() string { @@ -460,7 +468,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -503,9 +511,11 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ExtensionNumberResponse) String() string { @@ -516,7 +526,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -564,9 +574,11 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListServiceResponse) String() string { @@ -577,7 +589,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -618,9 +630,11 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServiceResponse) String() string { @@ -631,7 +645,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -672,9 +686,11 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ErrorResponse) String() string { @@ -685,7 +701,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -880,6 +896,104 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { if File_grpc_reflection_v1alpha_reflection_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 8eb1cf3b..20285451 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,7 +22,6 @@ package resolver import ( "context" - "errors" "fmt" "net" "net/url" @@ -238,8 +237,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn then forwards this error to the load balancing - // policy. + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -331,20 +330,3 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } - -// ValidateEndpoints validates endpoints from a petiole policy's perspective. -// Petiole policies should call this before calling into their children. See -// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) -// for details. -func ValidateEndpoints(endpoints []Endpoint) error { - if len(endpoints) == 0 { - return errors.New("endpoints list is empty") - } - - for _, endpoint := range endpoints { - for range endpoint.Addresses { - return nil - } - } - return errors.New("endpoints list contains no addresses") -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 9fac2b08..aba1ae3e 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadMessageHeader(header []byte) error + ReadHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadMessageHeader(p.header[:]) + err := p.r.ReadHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,6 +664,9 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) + if length == 0 { + return pf, nil, nil + } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -814,7 +817,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -838,7 +841,7 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM var uncompressedBuf []byte uncompressedBuf, err = dc.Do(compressed.Reader()) if err == nil { - out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} } size = len(uncompressedBuf) } else { @@ -874,7 +877,30 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return nil, 0, err } - out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) if err != nil { out.Free() return nil, 0, err @@ -882,14 +908,10 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } -type recvCompressor interface { - RecvCompress() string -} - // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 16065a02..d1e1415a 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -87,13 +87,12 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -// MethodHandler is a function type that processes a unary RPC method call. -type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler MethodHandler + Handler methodHandler } // ServiceDesc represents an RPC service's specification. @@ -622,8 +621,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.ServerStream channel forever and waits -// for data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1021,7 +1020,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.ServerStream) { + st.HandleStreams(ctx, func(stream *transport.Stream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1137,7 +1136,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1166,7 +1165,7 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = stream.Write(hdr, payload, opts) + err = t.Write(stream, hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1213,7 +1212,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1321,7 +1320,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - stream.WriteStatus(st) + t.WriteStatus(stream, st) return st.Err() } } @@ -1355,12 +1354,15 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := stream.WriteStatus(status.Convert(err)); e != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } defer d.Free() + if channelz.IsOn() { + t.IncrMsgRecv() + } df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) @@ -1402,7 +1404,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := stream.WriteStatus(appStatus); e != nil { + if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1429,20 +1431,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.WriteOptions{Last: true} + opts := &transport.Options{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := stream.WriteStatus(sts); e != nil { + if e := t.WriteStatus(stream, sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1482,6 +1484,9 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt binlog.Log(ctx, sm) } } + if channelz.IsOn() { + t.IncrMsgSent() + } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1497,7 +1502,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt binlog.Log(ctx, st) } } - return stream.WriteStatus(statusOK) + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1536,7 +1541,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1556,6 +1561,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, + t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1642,7 +1648,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - ss.s.WriteStatus(st) + t.WriteStatus(ss.s, st) return st.Err() } } @@ -1711,7 +1717,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv binlog.Log(ctx, st) } } - ss.s.WriteStatus(appStatus) + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1729,10 +1735,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv binlog.Log(ctx, st) } } - return ss.s.WriteStatus(statusOK) + return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1762,7 +1768,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1777,20 +1783,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser service := sm[:pos] method := sm[pos+1:] - // FromIncomingContext is expensive: skip if there are no statsHandlers - if len(s.opts.statsHandlers) > 0 { - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1799,17 +1802,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, stream, srv, md, ti) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, stream, srv, sd, ti) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1822,7 +1825,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -2097,7 +2100,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2119,7 +2122,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 7e83027d..2671c5ef 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,7 +168,6 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } - func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -298,7 +297,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func minPointers(a, b *int) *int { +func min(a, b *int) *int { if *a < *b { return a } @@ -310,7 +309,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return minPointers(mcMax, doptMax) + return min(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go deleted file mode 100644 index 641c8e97..00000000 --- a/vendor/google.golang.org/grpc/stats/metrics.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package stats - -import "maps" - -// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetricSet instead. -type MetricSet struct { - // metrics are the set of metrics to initialize. - metrics map[string]bool -} - -// NewMetricSet returns a MetricSet containing metricNames. -func NewMetricSet(metricNames ...string) *MetricSet { - newMetrics := make(map[string]bool) - for _, metric := range metricNames { - newMetrics[metric] = true - } - return &MetricSet{metrics: newMetrics} -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *MetricSet) Metrics() map[string]bool { - return m.metrics -} - -// Add adds the metricNames to the metrics set and returns a new copy with the -// additional metrics. -func (m *MetricSet) Add(metricNames ...string) *MetricSet { - newMetrics := make(map[string]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metricNames { - newMetrics[metric] = true - } - return &MetricSet{metrics: newMetrics} -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { - newMetrics := make(map[string]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &MetricSet{metrics: newMetrics} -} - -// Remove removes the metricNames from the metrics set and returns a new copy -// with the metrics removed. -func (m *MetricSet) Remove(metricNames ...string) *MetricSet { - newMetrics := make(map[string]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metricNames { - delete(newMetrics, metric) - } - return &MetricSet{metrics: newMetrics} -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 6f20d2d5..71195c49 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -260,42 +260,84 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// Deprecated: set the `grpc-tags-bin` header in the metadata instead. +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. func SetTags(ctx context.Context, b []byte) context.Context { - return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) + return context.WithValue(ctx, outgoingTagsKey{}, b) } // Tags returns the tags from the context for the inbound RPC. // -// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. func Tags(ctx context.Context) []byte { - traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") - if len(traceValues) == 0 { - return nil - } - return []byte(traceValues[len(traceValues)-1]) + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b } +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// Deprecated: set the `grpc-trace-bin` header in the metadata instead. +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. func SetTrace(ctx context.Context, b []byte) context.Context { - return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) + return context.WithValue(ctx, outgoingTraceKey{}, b) } // Trace returns the trace from the context for the inbound RPC. // -// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. func Trace(ctx context.Context) []byte { - traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") - if len(traceValues) == 0 { - return nil - } - return []byte(traceValues[len(traceValues)-1]) + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 17e2267b..bb2b2a21 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - rand "math/rand/v2" + "math/rand" "strconv" "sync" "time" @@ -113,9 +113,7 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. For unary or server-streaming - // RPCs (StreamDesc.ClientStreams is false), a nil error is returned - // unconditionally. + // the stream may be discovered using RecvMsg. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -218,7 +216,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -586,7 +584,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport - s *transport.ClientStream + s *transport.Stream p *parser pickResult balancer.PickResult @@ -708,10 +706,11 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) - // Apply jitter by multiplying with a random factor between 0.8 and 1.2 - cur *= 0.8 + 0.4*rand.Float64() - dur = time.Duration(int64(cur)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } @@ -992,7 +991,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1084,7 +1083,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1098,6 +1097,9 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } + if channelz.IsOn() { + a.t.IncrMsgSent() + } return nil } @@ -1151,6 +1153,9 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1178,7 +1183,7 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { - a.s.Close(err) + a.t.CloseStream(a.s, err) tr = a.s.Trailer() } @@ -1335,7 +1340,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.ClientStream + s *transport.Stream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1375,7 +1380,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1425,7 +1430,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1435,6 +1440,9 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } + if channelz.IsOn() { + as.t.IncrMsgSent() + } return nil } @@ -1472,6 +1480,9 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } + if channelz.IsOn() { + as.t.IncrMsgRecv() + } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1499,7 +1510,7 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { - as.s.Close(err) + as.t.CloseStream(as.s, err) } if err != nil { @@ -1566,7 +1577,8 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - s *transport.ServerStream + t transport.ServerTransport + s *transport.Stream p *parser codec baseCodec @@ -1616,7 +1628,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.s.SendHeader(md) + err = ss.t.WriteHeader(ss.s, md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1656,7 +1668,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.s.WriteStatus(st) + ss.t.WriteStatus(ss.s, st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1664,6 +1676,9 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1695,7 +1710,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } @@ -1741,7 +1756,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.s.WriteStatus(st) + ss.t.WriteStatus(ss.s, st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1749,6 +1764,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index a5b03882..5a47094a 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.69.0-dev" +const Version = "1.68.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index dd13e1d2..57e62d24 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -175,7 +175,7 @@ golang.org/x/text/unicode/norm # google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.69.0 +# google.golang.org/grpc v1.68.1 ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes