-
Notifications
You must be signed in to change notification settings - Fork 3.9k
Implement otel retry metrics #12064
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Implement otel retry metrics #12064
Changes from all commits
c3b473a
6c6a0f5
f9c5a68
bd69ed5
fab9a26
43a746b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,8 +18,11 @@ | |
|
||
import static com.google.common.base.Preconditions.checkNotNull; | ||
import static io.grpc.internal.GrpcUtil.IMPLEMENTATION_VERSION; | ||
import static io.grpc.opentelemetry.internal.OpenTelemetryConstants.HEDGE_BUCKETS; | ||
import static io.grpc.opentelemetry.internal.OpenTelemetryConstants.LATENCY_BUCKETS; | ||
import static io.grpc.opentelemetry.internal.OpenTelemetryConstants.RETRY_BUCKETS; | ||
import static io.grpc.opentelemetry.internal.OpenTelemetryConstants.SIZE_BUCKETS; | ||
import static io.grpc.opentelemetry.internal.OpenTelemetryConstants.TRANSPARENT_RETRY_BUCKETS; | ||
|
||
import com.google.common.annotations.VisibleForTesting; | ||
import com.google.common.base.Stopwatch; | ||
|
@@ -64,8 +67,8 @@ public Stopwatch get() { | |
}; | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add unit tests. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the gRFC has had recent updates, waiting for it to get merged before doing further changes... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, there had been discussions yesterday about the recent changes to the retry metrics and whether they helped and such. The implementation should be mostly the same, but it isn't probably worth chasing the gRFC for the moment. |
||
@VisibleForTesting | ||
static boolean ENABLE_OTEL_TRACING = GrpcUtil.getFlag("GRPC_EXPERIMENTAL_ENABLE_OTEL_TRACING", | ||
false); | ||
static boolean ENABLE_OTEL_TRACING = | ||
GrpcUtil.getFlag("GRPC_EXPERIMENTAL_ENABLE_OTEL_TRACING", false); | ||
AgraVator marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
private final OpenTelemetry openTelemetrySdk; | ||
private final MeterProvider meterProvider; | ||
|
@@ -241,6 +244,54 @@ static OpenTelemetryMetricsResource createMetricInstruments(Meter meter, | |
.build()); | ||
} | ||
|
||
if (isMetricEnabled("grpc.client.call.retries", enableMetrics, disableDefault)) { | ||
builder.clientCallRetriesCounter( | ||
meter.histogramBuilder( | ||
"grpc.client.call.retries") | ||
.setUnit("{retry}") | ||
.setDescription("Number of retries during the client call. " | ||
+ "If there were no retries, 0 is not reported.") | ||
.ofLongs() | ||
.setExplicitBucketBoundariesAdvice(RETRY_BUCKETS) | ||
.build()); | ||
} | ||
|
||
if (isMetricEnabled("grpc.client.call.transparent_retries", enableMetrics, | ||
disableDefault)) { | ||
builder.clientCallTransparentRetriesCounter( | ||
meter.histogramBuilder( | ||
"grpc.client.call.transparent_retries") | ||
.setUnit("{transparent_retry}") | ||
.setDescription("Number of transparent retries during the client call. " | ||
+ "If there were no transparent retries, 0 is not reported.") | ||
.ofLongs() | ||
.setExplicitBucketBoundariesAdvice(TRANSPARENT_RETRY_BUCKETS) | ||
.build()); | ||
} | ||
|
||
if (isMetricEnabled("grpc.client.call.hedges", enableMetrics, disableDefault)) { | ||
builder.clientCallHedgesCounter( | ||
meter.histogramBuilder( | ||
"grpc.client.call.hedges") | ||
.setUnit("{hedge}") | ||
.setDescription("Number of hedges during the client call. " | ||
+ "If there were no hedges, 0 is not reported.") | ||
.ofLongs() | ||
.setExplicitBucketBoundariesAdvice(HEDGE_BUCKETS) | ||
.build()); | ||
} | ||
|
||
if (isMetricEnabled("grpc.client.call.retry_delay", enableMetrics, disableDefault)) { | ||
builder.clientCallRetryDelayCounter( | ||
meter.histogramBuilder( | ||
"grpc.client.call.retry_delay") | ||
.setUnit("s") | ||
.setDescription("Total time of delay while there is no active attempt during the " | ||
+ "client call") | ||
.setExplicitBucketBoundariesAdvice(LATENCY_BUCKETS) | ||
.build()); | ||
} | ||
|
||
if (isMetricEnabled("grpc.server.call.started", enableMetrics, disableDefault)) { | ||
builder.serverCallCountCounter( | ||
meter.counterBuilder("grpc.server.call.started") | ||
|
@@ -259,8 +310,8 @@ static OpenTelemetryMetricsResource createMetricInstruments(Meter meter, | |
.build()); | ||
} | ||
|
||
if (isMetricEnabled("grpc.server.call.sent_total_compressed_message_size", enableMetrics, | ||
disableDefault)) { | ||
if (isMetricEnabled("grpc.server.call.sent_total_compressed_message_size", | ||
enableMetrics, disableDefault)) { | ||
builder.serverTotalSentCompressedMessageSizeCounter( | ||
meter.histogramBuilder( | ||
"grpc.server.call.sent_total_compressed_message_size") | ||
|
@@ -271,8 +322,8 @@ static OpenTelemetryMetricsResource createMetricInstruments(Meter meter, | |
.build()); | ||
} | ||
|
||
if (isMetricEnabled("grpc.server.call.rcvd_total_compressed_message_size", enableMetrics, | ||
disableDefault)) { | ||
if (isMetricEnabled("grpc.server.call.rcvd_total_compressed_message_size", | ||
enableMetrics, disableDefault)) { | ||
builder.serverTotalReceivedCompressedMessageSizeCounter( | ||
meter.histogramBuilder( | ||
"grpc.server.call.rcvd_total_compressed_message_size") | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -71,6 +71,7 @@ | |
*/ | ||
final class OpenTelemetryMetricsModule { | ||
private static final Logger logger = Logger.getLogger(OpenTelemetryMetricsModule.class.getName()); | ||
private static final double NANOS_PER_SEC = 1_000_000_000.0; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. SECONDS_PER_NANO already exists. Use multiplication instead of division. |
||
public static final ImmutableSet<String> DEFAULT_PER_CALL_METRICS_SET = | ||
ImmutableSet.of( | ||
"grpc.client.attempt.started", | ||
|
@@ -292,9 +293,12 @@ static final class CallAttemptsTracerFactory extends ClientStreamTracer.Factory | |
private final String fullMethodName; | ||
private final List<OpenTelemetryPlugin.ClientCallPlugin> callPlugins; | ||
private Status status; | ||
private long retryDelayNanos; | ||
private long callLatencyNanos; | ||
private final Object lock = new Object(); | ||
private final AtomicLong attemptsPerCall = new AtomicLong(); | ||
private final AtomicLong hedgedAttemptsPerCall = new AtomicLong(); | ||
private final AtomicLong transparentRetriesPerCall = new AtomicLong(); | ||
@GuardedBy("lock") | ||
private int activeStreams; | ||
@GuardedBy("lock") | ||
|
@@ -331,6 +335,7 @@ public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata metada | |
} | ||
if (++activeStreams == 1 && attemptStopwatch.isRunning()) { | ||
attemptStopwatch.stop(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This attemptStopwatch looks to be for There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. CallAttemptsTracerFactory.attemptEnded is for recording a stream ended (StatsTraceContext.streamClosed calls it), so for the first attempt and each retry attempt end the attemptStopWatch is started, so it does measure the time between stream attempts, not per call. But there is a problem. RetriableStream creates its own anonymous tracerFactory and not take the one put by I have a question. A call can only have 1 stream, and the only way the activeStreams can be > 1 is if the tracer factory is shared between calls (and that would mess up the calculation for time between stream attempts anyway). The way the OpenTelemetryMetricsModule.MetricsClientInterceptor creates the factory though is per call, not shared between calls. So we we can never have > 1 streams and there should be no need for synchronization either. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What I said about RetriableStream's anonymous tracerFactory is not a problem, StatsTraceContext.streamClosed iterates over all tracers and calls stream closed on each of them. |
||
retryDelayNanos = attemptStopwatch.elapsed(TimeUnit.NANOSECONDS); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can't tell how |
||
} | ||
} | ||
// Skip recording for the first time, since it is already recorded in | ||
|
@@ -344,7 +349,11 @@ public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata metada | |
module.resource.clientAttemptCountCounter().add(1, attribute); | ||
} | ||
} | ||
if (!info.isTransparentRetry()) { | ||
if (info.isTransparentRetry()) { | ||
transparentRetriesPerCall.incrementAndGet(); | ||
} else if (info.isHedging()) { | ||
hedgedAttemptsPerCall.incrementAndGet(); | ||
} else { | ||
attemptsPerCall.incrementAndGet(); | ||
Comment on lines
+352
to
357
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should this not assume them to be mutually exclusive ? |
||
} | ||
return newClientTracer(info); | ||
|
@@ -407,14 +416,53 @@ void recordFinishedCall() { | |
tracer.recordFinishedAttempt(); | ||
} | ||
callLatencyNanos = callStopWatch.elapsed(TimeUnit.NANOSECONDS); | ||
io.opentelemetry.api.common.Attributes attribute = | ||
io.opentelemetry.api.common.Attributes.of(METHOD_KEY, fullMethodName, | ||
TARGET_KEY, target, | ||
STATUS_KEY, status.getCode().toString()); | ||
|
||
// Base attributes | ||
io.opentelemetry.api.common.Attributes baseAttributes = | ||
io.opentelemetry.api.common.Attributes.of( | ||
METHOD_KEY, fullMethodName, | ||
TARGET_KEY, target | ||
); | ||
|
||
// Duration | ||
if (module.resource.clientCallDurationCounter() != null) { | ||
module.resource.clientCallDurationCounter() | ||
.record(callLatencyNanos * SECONDS_PER_NANO, attribute); | ||
module.resource.clientCallDurationCounter().record( | ||
callLatencyNanos * SECONDS_PER_NANO, | ||
kannanjgithub marked this conversation as resolved.
Show resolved
Hide resolved
|
||
baseAttributes.toBuilder() | ||
.put(STATUS_KEY, status.getCode().toString()) | ||
.build() | ||
); | ||
} | ||
|
||
// Retry counts | ||
if (module.resource.clientCallRetriesCounter() != null) { | ||
long retriesPerCall = attemptsPerCall.get() - 1 >= 0 ? attemptsPerCall.get() - 1 : 0; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
if (retriesPerCall > 0) { | ||
module.resource.clientCallRetriesCounter().record(retriesPerCall, baseAttributes); | ||
kannanjgithub marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
} | ||
|
||
// Hedge counts | ||
if (module.resource.clientCallHedgesCounter() != null) { | ||
if (hedgedAttemptsPerCall.get() > 0) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Read atomics ( |
||
module.resource.clientCallHedgesCounter() | ||
.record(hedgedAttemptsPerCall.get(), baseAttributes); | ||
} | ||
} | ||
|
||
// Transparent Retry counts | ||
if (module.resource.clientCallTransparentRetriesCounter() != null | ||
&& transparentRetriesPerCall.get() > 0) { | ||
module.resource.clientCallTransparentRetriesCounter().record( | ||
transparentRetriesPerCall.get(), baseAttributes); | ||
} | ||
|
||
// Retry delay | ||
if (module.resource.clientCallRetryDelayCounter() != null) { | ||
module.resource.clientCallRetryDelayCounter().record( | ||
retryDelayNanos / NANOS_PER_SEC, | ||
baseAttributes | ||
); | ||
} | ||
} | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
isHedging
is just whether there is a hedging policy. It doesn't mean this particular stream is a hedge.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So, are you suggesting to change the var name to better reflect the same ?