Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade to google.golang.org/grpc v1.66.2 / modify certain protobuf messages to retain their unmarshaling buffer #9401

Merged
merged 16 commits into from
Oct 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ require (
golang.org/x/net v0.30.0
golang.org/x/sync v0.8.0
golang.org/x/time v0.6.0
google.golang.org/grpc v1.66.0
google.golang.org/grpc v1.66.2
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
Expand Down Expand Up @@ -316,7 +316,3 @@ replace github.com/prometheus/alertmanager => github.com/grafana/prometheus-aler
// - https://github.com/grafana/franz-go/pull/3
// - https://github.com/grafana/franz-go/pull/4
replace github.com/twmb/franz-go => github.com/grafana/franz-go v0.0.0-20241009100846-782ba1442937

// Pin Google GRPC to v1.65.0 as v1.66.0 has API changes and also potentially performance regressions.
// Following https://github.com/grafana/dskit/pull/581
replace google.golang.org/grpc => google.golang.org/grpc v1.65.0
1,182 changes: 61 additions & 1,121 deletions go.sum

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions pkg/distributor/distributor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1384,6 +1384,7 @@ func NextOrCleanup(next PushFunc, pushReq *Request) (_ PushFunc, maybeCleanup fu
func (d *Distributor) Push(ctx context.Context, req *mimirpb.WriteRequest) (*mimirpb.WriteResponse, error) {
pushReq := NewParsedRequest(req)
pushReq.AddCleanup(func() {
req.FreeBuffer()
mimirpb.ReuseSlice(req.Timeseries)
})

Expand Down
10 changes: 10 additions & 0 deletions pkg/distributor/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [
if len(resp.Timeseries) > 0 {
for _, series := range resp.Timeseries {
if limitErr := queryLimiter.AddSeries(series.Labels); limitErr != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, limitErr
}
}
Expand All @@ -277,20 +278,24 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [
} else if len(resp.Chunkseries) > 0 {
// Enforce the max chunks limits.
if err := queryLimiter.AddChunks(ingester_client.ChunksCount(resp.Chunkseries)); err != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, err
}

if err := queryLimiter.AddEstimatedChunks(ingester_client.ChunksCount(resp.Chunkseries)); err != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, err
}

for _, series := range resp.Chunkseries {
if err := queryLimiter.AddSeries(series.Labels); err != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, err
}
}

if err := queryLimiter.AddChunkBytes(ingester_client.ChunksSize(resp.Chunkseries)); err != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, err
}

Expand All @@ -301,15 +306,18 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [

for _, s := range resp.StreamingSeries {
if err := queryLimiter.AddSeries(s.Labels); err != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, err
}

// We enforce the chunk count limit here, but enforce the chunk bytes limit while streaming the chunks themselves.
if err := queryLimiter.AddChunks(int(s.ChunkCount)); err != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, err
}

if err := queryLimiter.AddEstimatedChunks(int(s.ChunkCount)); err != nil {
resp.FreeBuffer()
return ingesterQueryResult{}, err
}

Expand All @@ -319,6 +327,8 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [
streamingSeriesBatches = append(streamingSeriesBatches, labelsBatch)
}

resp.FreeBuffer()

if resp.IsEndOfSeriesStream {
if streamingSeriesCount > 0 {
result.streamingSeries.Series = make([]labels.Labels, 0, streamingSeriesCount)
Expand Down
3 changes: 3 additions & 0 deletions pkg/frontend/querymiddleware/model.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 14 additions & 0 deletions pkg/frontend/querymiddleware/model.pb.go.expdiff
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
diff --git a/pkg/frontend/querymiddleware/model.pb.go b/pkg/frontend/querymiddleware/model.pb.go
index 315ed4eed..47f80838c 100644
--- a/pkg/frontend/querymiddleware/model.pb.go
+++ b/pkg/frontend/querymiddleware/model.pb.go
@@ -83,9 +83,6 @@ func (m *PrometheusHeader) GetValues() []string {
}

type PrometheusResponse struct {
- // Keep reference to buffer for unsafe references.
- github_com_grafana_mimir_pkg_mimirpb.BufferHolder
-
Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"`
Data *PrometheusData `protobuf:"bytes,2,opt,name=Data,proto3" json:"data,omitempty"`
ErrorType string `protobuf:"bytes,3,opt,name=ErrorType,proto3" json:"errorType,omitempty"`
23 changes: 20 additions & 3 deletions pkg/ingester/client/buffering_client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import (
"time"

"github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
Expand Down Expand Up @@ -68,7 +70,12 @@ func TestWriteRequestBufferingClient_Push(t *testing.T) {
}

reqs := serv.requests()
require.Equal(t, requestsToSend, reqs)
diff := cmp.Diff(requestsToSend, reqs, cmp.Comparer(func(a, b *mimirpb.WriteRequest) bool {
return cmp.Equal(*a, *b, cmpopts.IgnoreUnexported(mimirpb.WriteRequest{}), cmpopts.IgnoreUnexported(mimirpb.BufferHolder{}), cmp.Comparer(func(a, b mimirpb.PreallocTimeseries) bool {
return a.TimeSeries.Equal(b.TimeSeries)
}))
}))
require.Empty(t, diff)
})

t.Run("push with pooling", func(t *testing.T) {
Expand All @@ -85,7 +92,12 @@ func TestWriteRequestBufferingClient_Push(t *testing.T) {
}

reqs := serv.requests()
require.Equal(t, requestsToSend, reqs)
diff := cmp.Diff(requestsToSend, reqs, cmp.Comparer(func(a, b *mimirpb.WriteRequest) bool {
return cmp.Equal(*a, *b, cmpopts.IgnoreUnexported(mimirpb.WriteRequest{}), cmpopts.IgnoreUnexported(mimirpb.BufferHolder{}), cmp.Comparer(func(a, b mimirpb.PreallocTimeseries) bool {
return a.TimeSeries.Equal(b.TimeSeries)
}))
}))
require.Empty(t, diff)

// Verify that pool was used.
require.Greater(t, pool.Gets.Load(), int64(0))
Expand Down Expand Up @@ -149,7 +161,12 @@ func TestWriteRequestBufferingClient_Push_WithMultipleMarshalCalls(t *testing.T)
_, err := bufferingClient.Push(ctx, req)
require.NoError(t, err)

require.Equal(t, serv.requests(), []*mimirpb.WriteRequest{req})
diff := cmp.Diff([]*mimirpb.WriteRequest{req}, serv.requests(), cmp.Comparer(func(a, b *mimirpb.WriteRequest) bool {
return cmp.Equal(*a, *b, cmpopts.IgnoreUnexported(mimirpb.WriteRequest{}), cmpopts.IgnoreUnexported(mimirpb.BufferHolder{}), cmp.Comparer(func(a, b mimirpb.PreallocTimeseries) bool {
return a.TimeSeries.Equal(b.TimeSeries)
}))
}))
require.Empty(t, diff)

// Verify that all buffers from the pool were returned.
require.Greater(t, pool.Gets.Load(), int64(0))
Expand Down
15 changes: 15 additions & 0 deletions pkg/ingester/client/ingester.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

54 changes: 54 additions & 0 deletions pkg/ingester/client/ingester.pb.go.expdiff
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go
index 9398a5d80..bbefc14b1 100644
--- a/pkg/ingester/client/ingester.pb.go
+++ b/pkg/ingester/client/ingester.pb.go
@@ -582,9 +582,6 @@ func (m *ActiveSeriesRequest) GetType() ActiveSeriesRequest_RequestType {
}

type QueryResponse struct {
- // Keep reference to buffer for unsafe references.
- mimirpb.BufferHolder
-
Timeseries []mimirpb.TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
}

@@ -636,9 +633,6 @@ func (m *QueryResponse) GetTimeseries() []mimirpb.TimeSeries {
//
// Only one of these two options will be populated.
type QueryStreamResponse struct {
- // Keep reference to buffer for unsafe references.
- mimirpb.BufferHolder
-
Chunkseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=chunkseries,proto3" json:"chunkseries"`
Timeseries []mimirpb.TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries"`
StreamingSeries []QueryStreamSeries `protobuf:"bytes,3,rep,name=streaming_series,json=streamingSeries,proto3" json:"streaming_series"`
@@ -809,9 +803,6 @@ func (m *QueryStreamSeriesChunks) GetChunks() []Chunk {
}

type ExemplarQueryResponse struct {
- // Keep reference to buffer for unsafe references.
- mimirpb.BufferHolder
-
Timeseries []mimirpb.TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
}

@@ -1330,9 +1321,6 @@ func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers {
}

type MetricsForLabelMatchersResponse struct {
- // Keep reference to buffer for unsafe references.
- mimirpb.BufferHolder
-
Metric []*mimirpb.Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"`
}

@@ -1478,9 +1466,6 @@ func (m *MetricsMetadataResponse) GetMetadata() []*mimirpb.MetricMetadata {
}

type ActiveSeriesResponse struct {
- // Keep reference to buffer for unsafe references.
- mimirpb.BufferHolder
-
Metric []*mimirpb.Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"`
// bucket_count is only used when the request type was NATIVE_HISTOGRAM_SERIES.
// bucket_count contains the native histogram active buckets count for each series in "metric" above.
5 changes: 4 additions & 1 deletion pkg/ingester/ingester.go
Original file line number Diff line number Diff line change
Expand Up @@ -3855,7 +3855,10 @@ func (i *Ingester) checkAvailableForPush() error {

// PushToStorage implements ingest.Pusher interface for ingestion via ingest-storage.
func (i *Ingester) PushToStorage(ctx context.Context, req *mimirpb.WriteRequest) error {
err := i.PushWithCleanup(ctx, req, func() { mimirpb.ReuseSlice(req.Timeseries) })
err := i.PushWithCleanup(ctx, req, func() {
req.FreeBuffer()
mimirpb.ReuseSlice(req.Timeseries)
})
if err != nil {
return mapPushErrorToErrorWithStatus(err)
}
Expand Down
8 changes: 5 additions & 3 deletions pkg/ingester/ingester_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3278,8 +3278,10 @@ func TestIngester_Push(t *testing.T) {

// Push timeseries
for idx, req := range testData.reqs {
// Push metrics to the ingester. Override the default cleanup method of mimirpb.ReuseSlice with a no-op one.
err := i.PushWithCleanup(ctx, req, func() {})
// Push metrics to the ingester.
err := i.PushWithCleanup(ctx, req, func() {
req.FreeBuffer()
})

// We expect no error on any request except the last one
// which may error (and in that case we assert on it)
Expand Down Expand Up @@ -5516,7 +5518,7 @@ func TestIngester_QueryStream_StreamingWithManySamples(t *testing.T) {
IsEndOfSeriesStream: true,
}

require.Equal(t, seriesLabelsMsg, *resp)
require.EqualExportedValues(t, seriesLabelsMsg, *resp)

recvMsgs := 0
series := 0
Expand Down
76 changes: 76 additions & 0 deletions pkg/mimirpb/custom.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,84 @@ import (
"math"

"github.com/prometheus/prometheus/model/histogram"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/mem"
protobufproto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/protoadapt"
)

func init() {
c := encoding.GetCodecV2(proto.Name)
encoding.RegisterCodecV2(&codecV2{codec: c})
}

// codecV2 customizes gRPC unmarshalling.
type codecV2 struct {
codec encoding.CodecV2
}

var _ encoding.CodecV2 = &codecV2{}

func messageV2Of(v any) protobufproto.Message {
switch v := v.(type) {
case protoadapt.MessageV1:
return protoadapt.MessageV2Of(v)
case protoadapt.MessageV2:
return v
default:
panic(fmt.Errorf("unrecognized message type %T", v))
}
}

func (c *codecV2) Marshal(v any) (mem.BufferSlice, error) {
return c.codec.Marshal(v)
}

// Unmarshal customizes gRPC unmarshalling.
// If v wraps BufferHolder, its SetBuffer method is called with the unmarshalling buffer.
func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) error {
vv := messageV2Of(v)
buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
// Decrement buf's reference count. Note though that if v wraps BufferHolder,
// we increase buf's reference count first so it doesn't go to zero.
defer buf.Free()
aknuds1 marked this conversation as resolved.
Show resolved Hide resolved

if err := protobufproto.Unmarshal(buf.ReadOnlyData(), vv); err != nil {
return err
}

if holder, ok := v.(interface {
SetBuffer(mem.Buffer)
}); ok {
buf.Ref()
holder.SetBuffer(buf)
}

return nil
}

func (c *codecV2) Name() string {
return c.codec.Name()
}

// BufferHolder is a base type for protobuf messages that keep unsafe references to the unmarshalling buffer.
// Implementations of this interface should keep a reference to said buffer.
type BufferHolder struct {
buffer mem.Buffer
}

func (m *BufferHolder) SetBuffer(buf mem.Buffer) {
m.buffer = buf
}

func (m *BufferHolder) FreeBuffer() {
if m.buffer != nil {
m.buffer.Free()
m.buffer = nil
}
}

// MinTimestamp returns the minimum timestamp (milliseconds) among all series
// in the WriteRequest. Returns math.MaxInt64 if the request is empty.
func (m *WriteRequest) MinTimestamp() int64 {
Expand Down
Loading
Loading