diff --git a/metrics_proto.go b/metrics_proto.go index a48c4c8..0969321 100644 --- a/metrics_proto.go +++ b/metrics_proto.go @@ -45,6 +45,10 @@ import ( var errNilMetric = errors.New("expecting a non-nil metric") var errNilMetricDescriptor = errors.New("expecting a non-nil metric descriptor") +var percentileLabelKey = &metricspb.LabelKey{ + Key: "percentile", + Description: "the value at a given percentile of a distribution", +} type metricProtoPayload struct { node *commonpb.Node @@ -53,6 +57,18 @@ type metricProtoPayload struct { additionalLabels map[string]labelValue } +func (se *statsExporter) addPayload(node *commonpb.Node, rsc *resourcepb.Resource, labels map[string]labelValue, metrics ...*metricspb.Metric) { + for _, metric := range metrics { + payload := &metricProtoPayload{ + metric: metric, + resource: rsc, + node: node, + additionalLabels: labels, + } + se.protoMetricsBundler.Add(payload, 1) + } +} + // ExportMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring. func (se *statsExporter) ExportMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) error { if len(metrics) == 0 { @@ -66,18 +82,128 @@ func (se *statsExporter) ExportMetricsProto(ctx context.Context, node *commonpb. } for _, metric := range metrics { - payload := &metricProtoPayload{ - metric: metric, - resource: rsc, - node: node, - additionalLabels: additionalLabels, + if metric.GetMetricDescriptor().GetType() == metricspb.MetricDescriptor_SUMMARY { + se.addPayload(node, rsc, additionalLabels, se.convertSummaryMetrics(metric)...) + } else { + se.addPayload(node, rsc, additionalLabels, metric) } - se.protoMetricsBundler.Add(payload, 1) } return nil } +func (se *statsExporter) convertSummaryMetrics(summary *metricspb.Metric) []*metricspb.Metric { + var metrics []*metricspb.Metric + var percentileTss []*metricspb.TimeSeries + var countTss []*metricspb.TimeSeries + var sumTss []*metricspb.TimeSeries + + for _, ts := range summary.Timeseries { + lvs := ts.GetLabelValues() + + startTime := ts.StartTimestamp + for _, pt := range ts.GetPoints() { + ptTimestamp := pt.GetTimestamp() + summaryValue := pt.GetSummaryValue() + if summaryValue.Sum != nil { + sumTs := &metricspb.TimeSeries{ + LabelValues: lvs, + StartTimestamp: startTime, + Points: []*metricspb.Point{ + { + Value: &metricspb.Point_DoubleValue{ + DoubleValue: summaryValue.Sum.Value, + }, + Timestamp: ptTimestamp, + }, + }, + } + sumTss = append(sumTss, sumTs) + } + + if summaryValue.Count != nil { + countTs := &metricspb.TimeSeries{ + LabelValues: lvs, + StartTimestamp: startTime, + Points: []*metricspb.Point{ + { + Value: &metricspb.Point_Int64Value{ + Int64Value: summaryValue.Count.Value, + }, + Timestamp: ptTimestamp, + }, + }, + } + countTss = append(countTss, countTs) + } + + snapshot := summaryValue.GetSnapshot() + for _, percentileValue := range snapshot.GetPercentileValues() { + lvsWithPercentile := lvs[0:] + lvsWithPercentile = append(lvsWithPercentile, &metricspb.LabelValue{ + Value: fmt.Sprintf("%f", percentileValue.Percentile), + }) + percentileTs := &metricspb.TimeSeries{ + LabelValues: lvsWithPercentile, + StartTimestamp: nil, + Points: []*metricspb.Point{ + { + Value: &metricspb.Point_DoubleValue{ + DoubleValue: percentileValue.Value, + }, + Timestamp: ptTimestamp, + }, + }, + } + percentileTss = append(percentileTss, percentileTs) + } + } + + if len(sumTss) > 0 { + metric := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: fmt.Sprintf("%s_summary_sum", summary.GetMetricDescriptor().GetName()), + Description: summary.GetMetricDescriptor().GetDescription(), + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + Unit: summary.GetMetricDescriptor().GetUnit(), + LabelKeys: summary.GetMetricDescriptor().GetLabelKeys(), + }, + Timeseries: sumTss, + } + metrics = append(metrics, metric) + } + if len(countTss) > 0 { + metric := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: fmt.Sprintf("%s_summary_count", summary.GetMetricDescriptor().GetName()), + Description: summary.GetMetricDescriptor().GetDescription(), + Type: metricspb.MetricDescriptor_CUMULATIVE_INT64, + Unit: "1", + LabelKeys: summary.GetMetricDescriptor().GetLabelKeys(), + }, + Timeseries: countTss, + } + metrics = append(metrics, metric) + } + if len(percentileTss) > 0 { + lks := summary.GetMetricDescriptor().GetLabelKeys()[0:] + lks = append(lks, percentileLabelKey) + metric := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: fmt.Sprintf("%s_summary_percentile", summary.GetMetricDescriptor().GetName()), + Description: summary.GetMetricDescriptor().GetDescription(), + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + Unit: summary.GetMetricDescriptor().GetUnit(), + LabelKeys: lks, + }, + Timeseries: percentileTss, + } + metrics = append(metrics, metric) + } + } + return metrics +} + func (se *statsExporter) handleMetricsProtoUpload(payloads []*metricProtoPayload) error { ctx, cancel := se.o.newContextWithTimeout() defer cancel() diff --git a/metrics_proto_test.go b/metrics_proto_test.go index 4267b09..d392acd 100644 --- a/metrics_proto_test.go +++ b/metrics_proto_test.go @@ -20,7 +20,7 @@ import ( "strings" "testing" - monitoring "cloud.google.com/go/monitoring/apiv3" + "cloud.google.com/go/monitoring/apiv3" "github.com/golang/protobuf/ptypes/timestamp" distributionpb "google.golang.org/genproto/googleapis/api/distribution" labelpb "google.golang.org/genproto/googleapis/api/label" @@ -31,6 +31,8 @@ import ( commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" ) func TestProtoResourceToMonitoringResource(t *testing.T) { @@ -515,3 +517,169 @@ func TestNodeToDefaultLabels(t *testing.T) { } } } + +func TestConvertSummaryMetrics(t *testing.T) { + startTimestamp := ×tamp.Timestamp{ + Seconds: 1543160298, + Nanos: 100000090, + } + endTimestamp := ×tamp.Timestamp{ + Seconds: 1543160298, + Nanos: 100000997, + } + + tests := []struct { + in *metricspb.Metric + want []*metricspb.Metric + statsExporter *statsExporter + }{ + { + in: &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "summary_metric_descriptor", + Description: "This is a test", + Unit: "ms", + Type: metricspb.MetricDescriptor_SUMMARY, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + Points: []*metricspb.Point{ + { + Timestamp: endTimestamp, + Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Count: &wrappers.Int64Value{Value: 10}, + Sum: &wrappers.DoubleValue{Value: 119.0}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + makePercentileValue(5.6, 10.0), + makePercentileValue(9.6, 50.0), + makePercentileValue(12.6, 90.0), + makePercentileValue(19.6, 99.0), + }, + }, + }, + }, + }, + }, + }, + }, + }, + statsExporter: &statsExporter{ + o: Options{ProjectID: "foo"}, + }, + want: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "summary_metric_descriptor_summary_sum", + Description: "This is a test", + Unit: "ms", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + makeDoubleTs(119.0, "", startTimestamp, endTimestamp), + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "summary_metric_descriptor_summary_count", + Description: "This is a test", + Unit: "1", + Type: metricspb.MetricDescriptor_CUMULATIVE_INT64, + }, + Timeseries: []*metricspb.TimeSeries{ + makeInt64Ts(10, "", startTimestamp, endTimestamp), + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "summary_metric_descriptor_summary_percentile", + Description: "This is a test", + Unit: "ms", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{ + percentileLabelKey, + }, + }, + Timeseries: []*metricspb.TimeSeries{ + makeDoubleTs(5.6, "10.000000", nil, endTimestamp), + makeDoubleTs(9.6, "50.000000", nil, endTimestamp), + makeDoubleTs(12.6, "90.000000", nil, endTimestamp), + makeDoubleTs(19.6, "99.000000", nil, endTimestamp), + }, + }, + }, + }, + } + + for _, tt := range tests { + se := tt.statsExporter + if se == nil { + se = new(statsExporter) + } + got := se.convertSummaryMetrics(tt.in) + if !cmp.Equal(got, tt.want) { + t.Fatalf("conversion failed:\n got=%v\n want=%v\n", got, tt.want) + } + } +} + +func makeInt64Ts(val int64, label string, start, end *timestamp.Timestamp) *metricspb.TimeSeries { + ts := &metricspb.TimeSeries{ + StartTimestamp: start, + Points: makeInt64Point(val, end), + } + if label != "" { + ts.LabelValues = makeLabelValue(label) + } + return ts +} + +func makeInt64Point(val int64, end *timestamp.Timestamp) []*metricspb.Point { + return []*metricspb.Point{ + { + Timestamp: end, + Value: &metricspb.Point_Int64Value{ + Int64Value: val, + }, + }, + } +} + +func makeDoubleTs(val float64, label string, start, end *timestamp.Timestamp) *metricspb.TimeSeries { + ts := &metricspb.TimeSeries{ + StartTimestamp: start, + Points: makeDoublePoint(val, end), + } + if label != "" { + ts.LabelValues = makeLabelValue(label) + } + return ts +} + +func makeDoublePoint(val float64, end *timestamp.Timestamp) []*metricspb.Point { + return []*metricspb.Point{ + { + Timestamp: end, + Value: &metricspb.Point_DoubleValue{ + DoubleValue: val, + }, + }, + } +} + +func makeLabelValue(value string) []*metricspb.LabelValue { + return []*metricspb.LabelValue{ + { + Value: value, + }, + } +} + +func makePercentileValue(val, percentile float64) *metricspb.SummaryValue_Snapshot_ValueAtPercentile { + return &metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + Value: val, + Percentile: percentile, + } +}