From 4c540e0b5d8781ce20dc6d7acb9926c61df7daec Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Mon, 6 Nov 2023 16:54:21 +0800 Subject: [PATCH] otelgrpc: Add metrics support to NewServerHandler and NewClientHandler (#4356) --- CHANGELOG.md | 1 + .../google.golang.org/grpc/otelgrpc/config.go | 48 +- .../grpc/otelgrpc/interceptor.go | 10 +- .../grpc/otelgrpc/metadata_supplier.go | 4 +- .../grpc/otelgrpc/stats_handler.go | 116 ++- .../otelgrpc/test/grpc_stats_handler_test.go | 729 +++++++++++++++++- .../grpc/otelgrpc/test/grpc_test.go | 4 +- .../grpc/otelgrpc/test/interceptor_test.go | 4 +- 8 files changed, 863 insertions(+), 53 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bdc319aaf64..57306076fce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Add metrics support (No-op, OTLP and Prometheus) to `go.opentelemetry.io/contrib/exporters/autoexport`. (#4229, #4479) - Add support for `console` span exporter and metrics exporter in `go.opentelemetry.io/contrib/exporters/autoexport`. (#4486) - Set unit and description on all instruments in `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp`. (#4500) +- Add metric support for `grpc.StatsHandler` in `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc`. (#4356) ### Changed diff --git a/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/instrumentation/google.golang.org/grpc/otelgrpc/config.go index d9b91a24b17..bad05b6734f 100644 --- a/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -46,8 +46,14 @@ type config struct { ReceivedEvent bool SentEvent bool - meter metric.Meter - rpcServerDuration metric.Int64Histogram + tracer trace.Tracer + meter metric.Meter + + rpcDuration metric.Float64Histogram + rpcRequestSize metric.Int64Histogram + rpcResponseSize metric.Int64Histogram + rpcRequestsPerRPC metric.Int64Histogram + rpcResponsesPerRPC metric.Int64Histogram } // Option applies an option value for a config. @@ -56,7 +62,7 @@ type Option interface { } // newConfig returns a config configured with all the passed Options. -func newConfig(opts []Option) *config { +func newConfig(opts []Option, role string) *config { c := &config{ Propagators: otel.GetTextMapPropagator(), TracerProvider: otel.GetTracerProvider(), @@ -66,19 +72,53 @@ func newConfig(opts []Option) *config { o.apply(c) } + c.tracer = c.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + c.meter = c.MeterProvider.Meter( instrumentationName, metric.WithInstrumentationVersion(Version()), metric.WithSchemaURL(semconv.SchemaURL), ) + var err error - c.rpcServerDuration, err = c.meter.Int64Histogram("rpc.server.duration", + c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration", metric.WithDescription("Measures the duration of inbound RPC."), metric.WithUnit("ms")) if err != nil { otel.Handle(err) } + c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + metric.WithDescription("Measures size of RPC request messages (uncompressed)."), + metric.WithUnit("By")) + if err != nil { + otel.Handle(err) + } + + c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + metric.WithDescription("Measures size of RPC response messages (uncompressed)."), + metric.WithUnit("By")) + if err != nil { + otel.Handle(err) + } + + c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), + metric.WithUnit("{count}")) + if err != nil { + otel.Handle(err) + } + + c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), + metric.WithUnit("{count}")) + if err != nil { + otel.Handle(err) + } + return c } diff --git a/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 815eabb907e..031d1f4df68 100644 --- a/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -61,7 +61,7 @@ var ( // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable // for use in a grpc.Dial call. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { - cfg := newConfig(opts) + cfg := newConfig(opts, "client") tracer := cfg.TracerProvider.Tracer( instrumentationName, trace.WithInstrumentationVersion(Version()), @@ -255,7 +255,7 @@ func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable // for use in a grpc.Dial call. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { - cfg := newConfig(opts) + cfg := newConfig(opts, "client") tracer := cfg.TracerProvider.Tracer( instrumentationName, trace.WithInstrumentationVersion(Version()), @@ -325,7 +325,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { // UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable // for use in a grpc.NewServer call. func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { - cfg := newConfig(opts) + cfg := newConfig(opts, "server") tracer := cfg.TracerProvider.Tracer( instrumentationName, trace.WithInstrumentationVersion(Version()), @@ -387,7 +387,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := time.Since(before).Milliseconds() attr = append(attr, grpcStatusCodeAttr) - cfg.rpcServerDuration.Record(ctx, elapsedTime, metric.WithAttributes(attr...)) + cfg.rpcDuration.Record(ctx, float64(elapsedTime), metric.WithAttributes(attr...)) return resp, err } @@ -446,7 +446,7 @@ func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *s // StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable // for use in a grpc.NewServer call. func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { - cfg := newConfig(opts) + cfg := newConfig(opts, "server") tracer := cfg.TracerProvider.Tracer( instrumentationName, trace.WithInstrumentationVersion(Version()), diff --git a/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index d91c6df2370..f585fb6ae0c 100644 --- a/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -56,7 +56,7 @@ func (s *metadataSupplier) Keys() []string { // requests. // Deprecated: Unnecessary public func. func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { - c := newConfig(opts) + c := newConfig(opts, "") c.Propagators.Inject(ctx, &metadataSupplier{ metadata: md, }) @@ -78,7 +78,7 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont // This function is meant to be used on incoming requests. // Deprecated: Unnecessary public func. func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { - c := newConfig(opts) + c := newConfig(opts, "") ctx = c.Propagators.Extract(ctx, &metadataSupplier{ metadata: md, }) diff --git a/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index c64a53443bc..212e257ff72 100644 --- a/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -17,13 +17,16 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g import ( "context" "sync/atomic" + "time" grpc_codes "google.golang.org/grpc/codes" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) @@ -33,24 +36,32 @@ type gRPCContextKey struct{} type gRPCContext struct { messagesReceived int64 messagesSent int64 + metricAttrs []attribute.KeyValue +} + +type serverHandler struct { + *config } // NewServerHandler creates a stats.Handler for gRPC server. func NewServerHandler(opts ...Option) stats.Handler { h := &serverHandler{ - config: newConfig(opts), + config: newConfig(opts, "server"), } - h.tracer = h.config.TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) return h } -type serverHandler struct { - *config - tracer trace.Tracer +// TagConn can attach some information to the given context. +func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + span := trace.SpanFromContext(ctx) + attrs := peerAttr(peerFromCtx(ctx)) + span.SetAttributes(attrs...) + return ctx +} + +// HandleConn processes the Conn stats. +func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { } // TagRPC can attach some information to the given context. @@ -66,46 +77,30 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.WithAttributes(attrs...), ) - gctx := gRPCContext{} + gctx := gRPCContext{ + metricAttrs: attrs, + } return context.WithValue(ctx, gRPCContextKey{}, &gctx) } // HandleRPC processes the RPC stats. func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - handleRPC(ctx, rs) -} - -// TagConn can attach some information to the given context. -func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { - span := trace.SpanFromContext(ctx) - attrs := peerAttr(peerFromCtx(ctx)) - span.SetAttributes(attrs...) - return ctx + h.handleRPC(ctx, rs) } -// HandleConn processes the Conn stats. -func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { +type clientHandler struct { + *config } // NewClientHandler creates a stats.Handler for gRPC client. func NewClientHandler(opts ...Option) stats.Handler { h := &clientHandler{ - config: newConfig(opts), + config: newConfig(opts, "client"), } - h.tracer = h.config.TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) - return h } -type clientHandler struct { - *config - tracer trace.Tracer -} - // TagRPC can attach some information to the given context. func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { name, attrs := internal.ParseFullMethod(info.FullMethodName) @@ -117,14 +112,16 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.WithAttributes(attrs...), ) - gctx := gRPCContext{} + gctx := gRPCContext{ + metricAttrs: attrs, + } return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) } // HandleRPC processes the RPC stats. func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - handleRPC(ctx, rs) + h.handleRPC(ctx, rs) } // TagConn can attach some information to the given context. @@ -140,17 +137,22 @@ func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { // no-op } -func handleRPC(ctx context.Context, rs stats.RPCStats) { +func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { span := trace.SpanFromContext(ctx) gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) var messageId int64 + metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) + metricAttrs = append(metricAttrs, gctx.metricAttrs...) + wctx := withoutCancel(ctx) switch rs := rs.(type) { case *stats.Begin: case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) + c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) } + span.AddEvent("message", trace.WithAttributes( semconv.MessageTypeReceived, @@ -162,6 +164,7 @@ func handleRPC(ctx context.Context, rs stats.RPCStats) { case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) + c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) } span.AddEvent("message", @@ -172,16 +175,57 @@ func handleRPC(ctx context.Context, rs stats.RPCStats) { semconv.MessageUncompressedSizeKey.Int(rs.Length), ), ) + case *stats.OutTrailer: case *stats.End: + var rpcStatusAttr attribute.KeyValue + if rs.Error != nil { s, _ := status.FromError(rs.Error) span.SetStatus(codes.Error, s.Message()) - span.SetAttributes(statusCodeAttr(s.Code())) + rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) } else { - span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) } + span.SetAttributes(rpcStatusAttr) span.End() + + metricAttrs = append(metricAttrs, rpcStatusAttr) + c.rpcDuration.Record(wctx, float64(rs.EndTime.Sub(rs.BeginTime)), metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(wctx, gctx.messagesReceived, metric.WithAttributes(metricAttrs...)) + c.rpcResponsesPerRPC.Record(wctx, gctx.messagesSent, metric.WithAttributes(metricAttrs...)) + default: return } } + +func withoutCancel(parent context.Context) context.Context { + if parent == nil { + panic("cannot create context from nil parent") + } + return withoutCancelCtx{parent} +} + +type withoutCancelCtx struct { + c context.Context +} + +func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (withoutCancelCtx) Done() <-chan struct{} { + return nil +} + +func (withoutCancelCtx) Err() error { + return nil +} + +func (w withoutCancelCtx) Value(key any) any { + return w.c.Value(key) +} + +func (w withoutCancelCtx) String() string { + return "withoutCancel" +} diff --git a/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_stats_handler_test.go b/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_stats_handler_test.go index b6afd1a118d..eccb63b4082 100644 --- a/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_stats_handler_test.go +++ b/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_stats_handler_test.go @@ -15,6 +15,7 @@ package test import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -24,6 +25,11 @@ import ( "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" @@ -32,16 +38,20 @@ import ( func TestStatsHandler(t *testing.T) { clientSR := tracetest.NewSpanRecorder() clientTP := trace.NewTracerProvider(trace.WithSpanProcessor(clientSR)) + clientMetricReader := metric.NewManualReader() + clientMP := metric.NewMeterProvider(metric.WithReader(clientMetricReader)) serverSR := tracetest.NewSpanRecorder() serverTP := trace.NewTracerProvider(trace.WithSpanProcessor(serverSR)) + serverMetricReader := metric.NewManualReader() + serverMP := metric.NewMeterProvider(metric.WithReader(serverMetricReader)) assert.NoError(t, doCalls( []grpc.DialOption{ - grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(clientTP))), + grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(clientTP), otelgrpc.WithMeterProvider(clientMP))), }, []grpc.ServerOption{ - grpc.StatsHandler(otelgrpc.NewServerHandler(otelgrpc.WithTracerProvider(serverTP))), + grpc.StatsHandler(otelgrpc.NewServerHandler(otelgrpc.WithTracerProvider(serverTP), otelgrpc.WithMeterProvider(serverMP))), }, )) @@ -49,9 +59,17 @@ func TestStatsHandler(t *testing.T) { checkClientSpans(t, clientSR.Ended()) }) + t.Run("ClientMetrics", func(t *testing.T) { + checkClientMetrics(t, clientMetricReader) + }) + t.Run("ServerSpans", func(t *testing.T) { checkServerSpans(t, serverSR.Ended()) }) + + t.Run("ServerMetrics", func(t *testing.T) { + checkServerMetrics(t, serverMetricReader) + }) } func checkClientSpans(t *testing.T, spans []trace.ReadOnlySpan) { @@ -579,3 +597,710 @@ func checkServerSpans(t *testing.T, spans []trace.ReadOnlySpan) { otelgrpc.GRPCStatusCodeKey.Int64(int64(codes.OK)), }, pingPong.Attributes()) } + +func checkClientMetrics(t *testing.T, reader metric.Reader) { + rm := metricdata.ResourceMetrics{} + err := reader.Collect(context.Background(), &rm) + assert.NoError(t, err) + require.Len(t, rm.ScopeMetrics, 1) + require.Len(t, rm.ScopeMetrics[0].Metrics, 5) + expectedScopeMetric := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", + Version: otelgrpc.Version(), + SchemaURL: "https://opentelemetry.io/schemas/1.17.0", + }, + Metrics: []metricdata.Metrics{ + { + Name: "rpc.client.duration", + Description: "Measures the duration of inbound RPC.", + Unit: "ms", + Data: metricdata.Histogram[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[float64]{ + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + }, + }, + }, + { + Name: "rpc.client.request.size", + Description: "Measures size of RPC request messages (uncompressed).", + Unit: "By", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(0)), + Min: metricdata.NewExtrema(int64(0)), + Count: 1, + Sum: 0, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Max: metricdata.NewExtrema(int64(314167)), + Min: metricdata.NewExtrema(int64(314167)), + Count: 1, + Sum: 314167, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(58987)), + Min: metricdata.NewExtrema(int64(13)), + Count: 4, + Sum: 93082, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(58987)), + Min: metricdata.NewExtrema(int64(13)), + Count: 4, + Sum: 93082, + }, + }, + }, + }, + { + Name: "rpc.client.response.size", + Description: "Measures size of RPC response messages (uncompressed).", + Unit: "By", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(0)), + Min: metricdata.NewExtrema(int64(0)), + Count: 1, + Sum: 0, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Max: metricdata.NewExtrema(int64(271840)), + Min: metricdata.NewExtrema(int64(271840)), + Count: 1, + Sum: 271840, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(45912)), + Min: metricdata.NewExtrema(int64(12)), + Count: 4, + Sum: 74948, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(21)), + Min: metricdata.NewExtrema(int64(21)), + Count: 1, + Sum: 21, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(45918)), + Min: metricdata.NewExtrema(int64(16)), + Count: 4, + Sum: 74969, + }, + }, + }, + }, + { + Name: "rpc.client.requests_per_rpc", + Description: "Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs.", + Unit: "{count}", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + }, + }, + }, + { + Name: "rpc.client.responses_per_rpc", + Description: "Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs.", + Unit: "{count}", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + }, + }, + }, + }, + } + metricdatatest.AssertEqual(t, expectedScopeMetric, rm.ScopeMetrics[0], metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreValue()) +} + +func checkServerMetrics(t *testing.T, reader metric.Reader) { + rm := metricdata.ResourceMetrics{} + err := reader.Collect(context.Background(), &rm) + assert.NoError(t, err) + require.Len(t, rm.ScopeMetrics, 1) + require.Len(t, rm.ScopeMetrics[0].Metrics, 5) + expectedScopeMetric := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", + Version: otelgrpc.Version(), + SchemaURL: "https://opentelemetry.io/schemas/1.17.0", + }, + Metrics: []metricdata.Metrics{ + { + Name: "rpc.server.duration", + Description: "Measures the duration of inbound RPC.", + Unit: "ms", + Data: metricdata.Histogram[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[float64]{ + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + }, + }, + }, + }, + { + Name: "rpc.server.request.size", + Description: "Measures size of RPC request messages (uncompressed).", + Unit: "By", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(0)), + Min: metricdata.NewExtrema(int64(0)), + Count: 1, + Sum: 0, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Max: metricdata.NewExtrema(int64(271840)), + Min: metricdata.NewExtrema(int64(271840)), + Count: 1, + Sum: 271840, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(45912)), + Min: metricdata.NewExtrema(int64(12)), + Count: 4, + Sum: 74948, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(21)), + Min: metricdata.NewExtrema(int64(21)), + Count: 1, + Sum: 21, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(45918)), + Min: metricdata.NewExtrema(int64(16)), + Count: 4, + Sum: 74969, + }, + }, + }, + }, + { + Name: "rpc.server.response.size", + Description: "Measures size of RPC response messages (uncompressed).", + Unit: "By", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(0)), + Min: metricdata.NewExtrema(int64(0)), + Count: 1, + Sum: 0, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Max: metricdata.NewExtrema(int64(314167)), + Min: metricdata.NewExtrema(int64(314167)), + Count: 1, + Sum: 314167, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(58987)), + Min: metricdata.NewExtrema(int64(13)), + Count: 4, + Sum: 93082, + }, + { + Attributes: attribute.NewSet( + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}, + Max: metricdata.NewExtrema(int64(58987)), + Min: metricdata.NewExtrema(int64(13)), + Count: 4, + Sum: 93082, + }, + }, + }, + }, + { + Name: "rpc.server.requests_per_rpc", + Description: "Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs.", + Unit: "{count}", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + }, + }, + }, + { + Name: "rpc.server.responses_per_rpc", + Description: "Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs.", + Unit: "{count}", + Data: metricdata.Histogram[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("EmptyCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("UnaryCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingInputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(1)), + Min: metricdata.NewExtrema(int64(1)), + Count: 1, + Sum: 1, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("StreamingOutputCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + { + Attributes: attribute.NewSet( + semconv.RPCGRPCStatusCodeOk, + semconv.RPCMethod("FullDuplexCall"), + semconv.RPCService("grpc.testing.TestService"), + semconv.RPCSystemGRPC), + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + BucketCounts: []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Max: metricdata.NewExtrema(int64(4)), + Min: metricdata.NewExtrema(int64(4)), + Count: 1, + Sum: 4, + }, + }, + }, + }, + }, + } + + metricdatatest.AssertEqual(t, expectedScopeMetric, rm.ScopeMetrics[0], metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreValue()) +} diff --git a/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_test.go b/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_test.go index 7f8ba0d41ae..620e5c9f1aa 100644 --- a/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_test.go +++ b/instrumentation/google.golang.org/grpc/otelgrpc/test/grpc_test.go @@ -614,9 +614,9 @@ func checkUnaryServerRecords(t *testing.T, reader metric.Reader) { Name: "rpc.server.duration", Description: "Measures the duration of inbound RPC.", Unit: "ms", - Data: metricdata.Histogram[int64]{ + Data: metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, - DataPoints: []metricdata.HistogramDataPoint[int64]{ + DataPoints: []metricdata.HistogramDataPoint[float64]{ { Attributes: attribute.NewSet( semconv.RPCMethod("EmptyCall"), diff --git a/instrumentation/google.golang.org/grpc/otelgrpc/test/interceptor_test.go b/instrumentation/google.golang.org/grpc/otelgrpc/test/interceptor_test.go index 3c9bf04d474..03e1876ceb2 100644 --- a/instrumentation/google.golang.org/grpc/otelgrpc/test/interceptor_test.go +++ b/instrumentation/google.golang.org/grpc/otelgrpc/test/interceptor_test.go @@ -1095,9 +1095,9 @@ func assertServerMetrics(t *testing.T, reader metric.Reader, serviceName, name s Name: "rpc.server.duration", Description: "Measures the duration of inbound RPC.", Unit: "ms", - Data: metricdata.Histogram[int64]{ + Data: metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, - DataPoints: []metricdata.HistogramDataPoint[int64]{ + DataPoints: []metricdata.HistogramDataPoint[float64]{ { Attributes: attribute.NewSet( semconv.RPCMethod(name),