From 3ac198d048f6beda29271e055a9b96a030aceb9a Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Mon, 22 Sep 2025 17:05:24 +0000 Subject: [PATCH 01/34] Update google genai instrumentation to latest semantic convention. Co-authored-by: Aaron Abbott --- .../instrumentation/google_genai/flags.py | 23 +- .../google_genai/generate_content.py | 287 ++++++++++++++---- .../instrumentation/google_genai/message.py | 141 +++++++++ .../google_genai/message_models.py | 58 ++++ .../google_genai/otel_wrapper.py | 36 ++- .../google_genai/tool_call_wrapper.py | 58 ++-- 6 files changed, 500 insertions(+), 103 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py index 541d9ab48f..6fd404eadf 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py @@ -12,12 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +from os import environ +from typing import Union -_CONTENT_RECORDING_ENV_VAR = ( - "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" +from opentelemetry.instrumentation._semconv import _StabilityMode +from opentelemetry.util.genai.environment_variables import ( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, ) +from opentelemetry.util.genai.types import ContentCapturingMode +from opentelemetry.util.genai.utils import get_content_capturing_mode -def is_content_recording_enabled(): - return os.getenv(_CONTENT_RECORDING_ENV_VAR, "false").lower() == "true" +def is_content_recording_enabled( + mode: _StabilityMode, +) -> Union[bool, ContentCapturingMode]: + if mode == _StabilityMode.DEFAULT: + capture_content = environ.get( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false" + ) + return capture_content.lower() == "true" + if mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL: + return get_content_capturing_mode() + raise RuntimeError(f"{mode} mode not supported") diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 7e85336e56..cf8bc7da67 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -13,6 +13,7 @@ # limitations under the License. import copy +import dataclasses import functools import json import logging @@ -21,6 +22,7 @@ from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union from google.genai.models import AsyncModels, Models +from google.genai.models import t as transformers from google.genai.types import ( BlockedReason, Candidate, @@ -33,18 +35,33 @@ GenerateContentConfigOrDict, GenerateContentResponse, ) - from opentelemetry import trace +from opentelemetry._events import Event +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) from opentelemetry.semconv._incubating.attributes import ( code_attributes, gen_ai_attributes, ) from opentelemetry.semconv.attributes import error_attributes +from opentelemetry.trace.span import Span +from opentelemetry.util.genai.types import ContentCapturingMode +from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG from .dict_util import flatten_dict from .flags import is_content_recording_enabled +from .message import ( + InputMessage, + OutputMessage, + to_input_messages, + to_output_messages, + to_system_instruction, +) from .otel_wrapper import OTelWrapper from .tool_call_wrapper import wrapped as wrapped_tool @@ -144,7 +161,7 @@ def _to_dict(value: object): def _add_request_options_to_span( - span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList + span: Span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList ): if config is None: return @@ -188,9 +205,7 @@ def _add_request_options_to_span( }, ) for key, value in attributes.items(): - if key.startswith( - GCP_GENAI_OPERATION_CONFIG - ) and not allow_list.allowed(key): + if key.startswith(GCP_GENAI_OPERATION_CONFIG) and not allow_list.allowed(key): # The allowlist is used to control inclusion of the dynamic keys. continue span.set_attribute(key, value) @@ -226,12 +241,42 @@ def _wrapped_config_with_tools( if not config.tools: return config result = copy.copy(config) - result.tools = [ - wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools - ] + result.tools = [wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools] return result +def _config_to_system_instruction( + config: Union[GenerateContentConfigOrDict, None], +) -> Union[ContentUnion, None]: + if not config: + return None + + if isinstance(config, dict): + return GenerateContentConfig.model_validate(config).system_instruction + return config.system_instruction + + +def _create_completion_details_attributes( + input_messages: list[InputMessage], + output_messages: list[OutputMessage], + system_instruction: Union[InputMessage, None], +): + attributes = { + "gen_ai.input.messages": json.dumps( + [dataclasses.asdict(input_message) for input_message in input_messages] + ), + "gen_ai.output.messages": json.dumps( + [dataclasses.asdict(output_message) for output_message in output_messages] + ), + } + if system_instruction: + attributes["gen_ai.system.instructions"] = json.dumps( + dataclasses.asdict(system_instruction) + ) + + return attributes + + class _GenerateContentInstrumentationHelper: def __init__( self, @@ -248,7 +293,12 @@ def __init__( self._error_type = None self._input_tokens = 0 self._output_tokens = 0 - self._content_recording_enabled = is_content_recording_enabled() + self.sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.GEN_AI + ) + self._content_recording_enabled = is_content_recording_enabled( + self.sem_conv_opt_in_mode + ) self._response_index = 0 self._candidate_index = 0 self._generate_content_config_key_allowlist = ( @@ -268,7 +318,7 @@ def wrapped_config( def start_span_as_current_span( self, model_name, function_name, end_on_exit=True - ): + ) -> Span: return self._otel_wrapper.start_as_current_span( f"{_GENERATE_CONTENT_OP_NAME} {model_name}", start_time=self._start_time, @@ -281,29 +331,37 @@ def start_span_as_current_span( end_on_exit=end_on_exit, ) - def process_request( - self, - contents: Union[ContentListUnion, ContentListUnionDict], - config: Optional[GenerateContentConfigOrDict], + def add_request_options_to_span( + self, config: Optional[GenerateContentConfigOrDict] ): span = trace.get_current_span() _add_request_options_to_span( span, config, self._generate_content_config_key_allowlist ) + + def process_request( + self, + contents: Union[ContentListUnion, ContentListUnionDict], + config: Optional[GenerateContentConfigOrDict], + ): self._maybe_log_system_instruction(config=config) self._maybe_log_user_prompt(contents) def process_response(self, response: GenerateContentResponse): - # TODO: Determine if there are other response properties that - # need to be reflected back into the span attributes. - # - # See also: TODOS.md. - self._update_finish_reasons(response) - self._maybe_update_token_counts(response) - self._maybe_update_error_type(response) + self._update_response(response) self._maybe_log_response(response) self._response_index += 1 + def process_completion( + self, + request: Union[ContentListUnion, ContentListUnionDict], + response: GenerateContentResponse, + config: Optional[GenerateContentConfigOrDict] = None, + ): + self._update_response(response) + self._maybe_log_completion_details(request, response, config) + self._response_index += 1 + def process_error(self, e: Exception): self._error_type = str(e.__class__.__name__) @@ -322,7 +380,16 @@ def finalize_processing(self): self._record_token_usage_metric() self._record_duration_metric() - def _update_finish_reasons(self, response): + def _update_response(self, response: GenerateContentResponse): + # TODO: Determine if there are other response properties that + # need to be reflected back into the span attributes. + # + # See also: TODOS.md. + self._update_finish_reasons(response) + self._maybe_update_token_counts(response) + self._maybe_update_error_type(response) + + def _update_finish_reasons(self, response: GenerateContentResponse): if not response.candidates: return for candidate in response.candidates: @@ -373,6 +440,56 @@ def _maybe_update_error_type(self, response: GenerateContentResponse): block_reason = response.prompt_feedback.block_reason.name.upper() self._error_type = f"BLOCKED_{block_reason}" + def _maybe_log_completion_details( + self, + request: Union[ContentListUnion, ContentListUnionDict], + response: GenerateContentResponse, + config: Optional[GenerateContentConfigOrDict] = None, + ): + attributes = { + gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system, + } + system_instruction = None + if system_content := _config_to_system_instruction(config): + system_instruction = to_system_instruction( + content=transformers.t_contents(system_content)[0] + ) + input_messages = to_input_messages(contents=transformers.t_contents(request)) + output_messages = to_output_messages(candidates=response.candidates or []) + + completion_details_attributes = _create_completion_details_attributes( + input_messages, output_messages, system_instruction + ) + + span = None + if self._content_recording_enabled in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + span = trace.get_current_span() + span.set_attributes(completion_details_attributes) + if self._content_recording_enabled in [ + ContentCapturingMode.EVENT_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + attributes.update(completion_details_attributes) + event = Event(name="gen_ai.completion.details", attributes=attributes) + hook = load_upload_hook() + hook.upload( + inputs=input_messages, + outputs=output_messages, + system_instruction=( + system_instruction.parts if system_instruction else [] + ), + span=span, + log_record=event, + ) + # TODO Cannot access attribute shutdown for class UploadHook + # hook.shutdown() + self._otel_wrapper.log_completion_details( + event=event, + ) + def _maybe_log_system_instruction( self, config: Optional[GenerateContentConfigOrDict] = None ): @@ -410,9 +527,7 @@ def _maybe_log_user_prompt( total = len(contents) index = 0 for entry in contents: - self._maybe_log_single_user_prompt( - entry, index=index, total=total - ) + self._maybe_log_single_user_prompt(entry, index=index, total=total) index += 1 else: self._maybe_log_single_user_prompt(contents) @@ -445,32 +560,6 @@ def _maybe_log_single_user_prompt( body=body, ) - def _maybe_log_response_stats(self, response: GenerateContentResponse): - # TODO: Determine if there is a way that we can log a summary - # of the overall response in a manner that is aligned with - # Semantic Conventions. For example, it would be natural - # to report an event that looks something like: - # - # gen_ai.response.stats { - # response_index: 0, - # candidate_count: 3, - # parts_per_candidate: [ - # 3, - # 1, - # 5 - # ] - # } - # - pass - - def _maybe_log_response_safety_ratings( - self, response: GenerateContentResponse - ): - # TODO: Determine if there is a way that we can log - # the "prompt_feedback". This would be especially useful - # in the case where the response is blocked. - pass - def _maybe_log_response(self, response: GenerateContentResponse): self._maybe_log_response_stats(response) self._maybe_log_response_safety_ratings(response) @@ -526,6 +615,30 @@ def _maybe_log_response_candidate( body=body, ) + def _maybe_log_response_stats(self, response: GenerateContentResponse): + # TODO: Determine if there is a way that we can log a summary + # of the overall response in a manner that is aligned with + # Semantic Conventions. For example, it would be natural + # to report an event that looks something like: + # + # gen_ai.response.stats { + # response_index: 0, + # candidate_count: 3, + # parts_per_candidate: [ + # 3, + # 1, + # 5 + # ] + # } + # + pass + + def _maybe_log_response_safety_ratings(self, response: GenerateContentResponse): + # TODO: Determine if there is a way that we can log + # the "prompt_feedback". This would be especially useful + # in the case where the response is blocked. + pass + def _record_token_usage_metric(self): self._otel_wrapper.token_usage_metric.record( self._input_tokens, @@ -587,7 +700,9 @@ def instrumented_generate_content( with helper.start_span_as_current_span( model, "google.genai.Models.generate_content" ): - helper.process_request(contents, config) + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_request(contents, config) try: response = wrapped_func( self, @@ -596,7 +711,17 @@ def instrumented_generate_content( config=helper.wrapped_config(config), **kwargs, ) - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) return response except Exception as error: helper.process_error(error) @@ -632,7 +757,9 @@ def instrumented_generate_content_stream( with helper.start_span_as_current_span( model, "google.genai.Models.generate_content_stream" ): - helper.process_request(contents, config) + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_request(contents, config) try: for response in wrapped_func( self, @@ -641,7 +768,17 @@ def instrumented_generate_content_stream( config=helper.wrapped_config(config), **kwargs, ): - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) yield response except Exception as error: helper.process_error(error) @@ -677,7 +814,9 @@ async def instrumented_generate_content( with helper.start_span_as_current_span( model, "google.genai.AsyncModels.generate_content" ): - helper.process_request(contents, config) + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_request(contents, config) try: response = await wrapped_func( self, @@ -686,7 +825,17 @@ async def instrumented_generate_content( config=helper.wrapped_config(config), **kwargs, ) - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) return response except Exception as error: helper.process_error(error) @@ -725,6 +874,8 @@ async def instrumented_generate_content_stream( "google.genai.AsyncModels.generate_content_stream", end_on_exit=False, ) as span: + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: helper.process_request(contents, config) try: response_async_generator = await wrapped_func( @@ -744,7 +895,17 @@ async def _response_async_generator_wrapper(): with trace.use_span(span, end_on_exit=True): try: async for response in response_async_generator: - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) yield response except Exception as error: helper.process_error(error) @@ -782,9 +943,11 @@ def instrument_generate_content( otel_wrapper, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) - AsyncModels.generate_content_stream = _create_instrumented_async_generate_content_stream( - snapshot, - otel_wrapper, - generate_content_config_key_allowlist=generate_content_config_key_allowlist, + AsyncModels.generate_content_stream = ( + _create_instrumented_async_generate_content_stream( + snapshot, + otel_wrapper, + generate_content_config_key_allowlist=generate_content_config_key_allowlist, + ) ) return snapshot diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py new file mode 100644 index 0000000000..eaf470892e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -0,0 +1,141 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging + +from google.genai import types as genai_types +from opentelemetry.util.genai.types import ( + InputMessage, + OutputMessage, + MessagePart, + FinishReason, + Text, + ToolCall, + ToolCallResponse, +) + +from .message_models import ( + # BlobPart, + # FileDataPart, + Role, +) + +_logger = logging.getLogger(__name__) + +def to_input_messages( + *, + contents: list[genai_types.Content], +) -> list[InputMessage]: + return [_to_input_message(content) for content in contents]) + +def to_output_messages( + *, + candidates: list[genai_types.Candidate], +) -> list[OutputMessage]: + def content_to_output_message( + candidate: genai_types.Candidate, + ) -> OutputMessage | None: + if not candidate.content: + return None + + message = _to_input_message(candidate.content) + return OutputMessage( + finish_reason=_to_finish_reason(candidate.finish_reason), + role=message.role, + parts=message.parts, + ) + + messages = ( + content_to_output_message(candidate) for candidate in candidates + ) + return [message for message in messages if message is not None] + +def to_system_instruction( + *, + content: genai_types.Content, +) -> InputMessage: + return _to_input_message(content) + +def _to_input_message( + content: genai_types.Content, +) -> InputMessage: + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) + return InputMessage( + role=_to_role(content.role), + # filter Nones + parts=[part for part in parts if part is not None], + ) + +def _to_part(part: genai_types.Part, idx: int) -> MessagePart | None: + def tool_call_id(name: str | None) -> str: + if name: + return f"{name}_{idx}" + return f"{idx}" + + if (text := part.text) is not None: + return Text(content=text) + + # if data := part.inline_data: # TODO ??? + # return BlobPart(mime_type=data.mime_type or "", data=data.data or b"") + + # if data := part.file_data: # TODO ??? + # return FileDataPart( + # mime_type=data.mime_type or "", file_uri=data.file_uri or "" + # ) + + if call := part.function_call: + return ToolCall( + id=call.id or tool_call_id(call.name), # TODO ??? + name=call.name or "", + arguments=call.args, + ) + + if response := part.function_response: + return ToolCallResponse( + id=response.id or tool_call_id(response.name), # TODO ??? + response=response.response, + ) + + _logger.info("Unknown part dropped from telemetry %s", part) + return None + +def _to_role(role: str | None) -> Role | str: + if role == "user": + return Role.USER + if role == "model": + return Role.ASSISTANT + return "" + + +def _to_finish_reason( + finish_reason: genai_types.FinishReason | None, +) -> FinishReason | str: + if finish_reason is None: + return "" + if ( + finish_reason is genai_types.FinishReason.FINISH_REASON_UNSPECIFIED + or finish_reason is genai_types.FinishReason.OTHER + ): + return "error" + if finish_reason is genai_types.FinishReason.STOP: + return "stop" + if finish_reason is genai_types.FinishReason.MAX_TOKENS: + return "length" + + # If there is no 1:1 mapping to an OTel preferred enum value, use the exact vertex reason + return finish_reason.name diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py new file mode 100644 index 0000000000..bf5b804ba7 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py @@ -0,0 +1,58 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copied and adapted from +# https://gist.github.com/lmolkova/09ba0de7f68280f1eac27a6acfd9b1a6?permalink_comment_id=5578799#gistcomment-5578799 + +from enum import Enum +from typing import Annotated, Literal + +from pydantic import Base64Encoder, BaseModel, EncodedBytes + + +class Base64OneWayEncoder(Base64Encoder): + @classmethod + def decode(cls, data: bytes) -> bytes: + """NoOp""" + return data + + +Base64EncodedBytes = Annotated[ + bytes, EncodedBytes(encoder=Base64OneWayEncoder) +] + + +class Role(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + TOOL = "tool" + + +class BlobPart(BaseModel): + type: Literal["blob"] = "blob" + mime_type: str + data: Base64EncodedBytes + + class Config: + extra = "allow" + + +class FileDataPart(BaseModel): + type: Literal["file_data"] = "file_data" + mime_type: str + file_uri: str + + class Config: + extra = "allow" diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py index b7dbb5de41..3d6a5a41a2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import logging +from typing import Any import google.genai - -from opentelemetry._events import Event +from opentelemetry._events import Event, EventLogger, EventLoggerProvider +from opentelemetry.metrics import Meter, MeterProvider from opentelemetry.semconv._incubating.metrics import gen_ai_metrics from opentelemetry.semconv.schemas import Schemas +from opentelemetry.trace import Tracer, TracerProvider from .version import __version__ as _LIBRARY_VERSION @@ -36,19 +39,23 @@ class OTelWrapper: - def __init__(self, tracer, event_logger, meter): + def __init__(self, tracer: Tracer, event_logger: EventLogger, meter: Meter): self._tracer = tracer self._event_logger = event_logger self._meter = meter self._operation_duration_metric = ( gen_ai_metrics.create_gen_ai_client_operation_duration(meter) ) - self._token_usage_metric = ( - gen_ai_metrics.create_gen_ai_client_token_usage(meter) + self._token_usage_metric = gen_ai_metrics.create_gen_ai_client_token_usage( + meter ) @staticmethod - def from_providers(tracer_provider, event_logger_provider, meter_provider): + def from_providers( + tracer_provider: TracerProvider, + event_logger_provider: EventLoggerProvider, + meter_provider: MeterProvider, + ): return OTelWrapper( tracer_provider.get_tracer( _SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES @@ -72,21 +79,30 @@ def operation_duration_metric(self): def token_usage_metric(self): return self._token_usage_metric - def log_system_prompt(self, attributes, body): + def log_system_prompt(self, attributes: dict[str, str], body: dict[str, Any]): _logger.debug("Recording system prompt.") event_name = "gen_ai.system.message" self._log_event(event_name, attributes, body) - def log_user_prompt(self, attributes, body): + def log_user_prompt(self, attributes: dict[str, str], body: dict[str, Any]): _logger.debug("Recording user prompt.") event_name = "gen_ai.user.message" self._log_event(event_name, attributes, body) - def log_response_content(self, attributes, body): + def log_response_content(self, attributes: dict[str, str], body: dict[str, Any]): _logger.debug("Recording response.") event_name = "gen_ai.choice" self._log_event(event_name, attributes, body) - def _log_event(self, event_name, attributes, body): + def _log_event( + self, event_name: str, attributes: dict[str, str], body: dict[str, Any] + ): event = Event(event_name, body=body, attributes=attributes) self._event_logger.emit(event) + + def log_completion_details( + self, + event: Event, + ) -> None: + _logger.debug("Recording completion details event.") + self._event_logger.emit(event) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py index 7b4cc1924a..80d83ea3ae 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py @@ -27,6 +27,13 @@ from opentelemetry.semconv._incubating.attributes import ( code_attributes, ) +from opentelemetry.instrumentation._semconv import ( + _StabilityMode, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, +) + +from opentelemetry.util.genai.types import ContentCapturingMode from .flags import is_content_recording_enabled from .otel_wrapper import OTelWrapper @@ -45,9 +52,7 @@ def _to_otel_value(python_value): if isinstance(python_value, list): return [_to_otel_value(x) for x in python_value] if isinstance(python_value, dict): - return { - key: _to_otel_value(val) for (key, val) in python_value.items() - } + return {key: _to_otel_value(val) for (key, val) in python_value.items()} if hasattr(python_value, "model_dump"): return python_value.model_dump() if hasattr(python_value, "__dict__"): @@ -76,6 +81,21 @@ def _to_otel_attribute(python_value): return json.dumps(otel_value) +def _is_capture_content_enabled() -> bool: + mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.GEN_AI + ) + if mode == _StabilityMode.DEFAULT: + return bool(is_content_recording_enabled(mode)) + if mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL: + capturing_mode = is_content_recording_enabled(mode) + return capturing_mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ] + raise RuntimeError(f"{mode} mode not supported") + + def _create_function_span_name(wrapped_function): """Constructs the span name for a given local function tool call.""" function_name = wrapped_function.__name__ @@ -100,9 +120,7 @@ def _create_function_span_attributes( return result -def _record_function_call_argument( - span, param_name, param_value, include_values -): +def _record_function_call_argument(span, param_name, param_value, include_values): attribute_prefix = f"code.function.parameters.{param_name}" type_attribute = f"{attribute_prefix}.type" span.set_attribute(type_attribute, type(param_value).__name__) @@ -115,7 +133,7 @@ def _record_function_call_arguments( otel_wrapper, wrapped_function, function_args, function_kwargs ): """Records the details about a function invocation as span attributes.""" - include_values = is_content_recording_enabled() + include_values = _is_capture_content_enabled() span = trace.get_current_span() signature = inspect.signature(wrapped_function) params = list(signature.parameters.values()) @@ -130,13 +148,11 @@ def _record_function_call_arguments( def _record_function_call_result(otel_wrapper, wrapped_function, result): """Records the details about a function result as span attributes.""" - include_values = is_content_recording_enabled() + include_values = _is_capture_content_enabled() span = trace.get_current_span() span.set_attribute("code.function.return.type", type(result).__name__) if include_values: - span.set_attribute( - "code.function.return.value", _to_otel_attribute(result) - ) + span.set_attribute("code.function.return.value", _to_otel_attribute(result)) def _wrap_sync_tool_function( @@ -151,12 +167,8 @@ def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span( - span_name, attributes=attributes - ): - _record_function_call_arguments( - otel_wrapper, tool_function, args, kwargs - ) + with otel_wrapper.start_as_current_span(span_name, attributes=attributes): + _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) result = tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -176,12 +188,8 @@ async def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span( - span_name, attributes=attributes - ): - _record_function_call_arguments( - otel_wrapper, tool_function, args, kwargs - ) + with otel_wrapper.start_as_current_span(span_name, attributes=attributes): + _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) result = await tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -207,9 +215,7 @@ def wrapped( if tool_or_tools is None: return None if isinstance(tool_or_tools, list): - return [ - wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools - ] + return [wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools] if isinstance(tool_or_tools, dict): return { key: wrapped(value, otel_wrapper, **kwargs) From 9ea22781a190e651f9d66700430e01b8705f45d7 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 11:55:20 +0000 Subject: [PATCH 02/34] fix: log roles as str, event attrs as objects. --- .../google_genai/generate_content.py | 53 +++++++++---------- .../instrumentation/google_genai/message.py | 17 +++--- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index cf8bc7da67..fa47ac2a45 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -48,7 +48,7 @@ ) from opentelemetry.semconv.attributes import error_attributes from opentelemetry.trace.span import Span -from opentelemetry.util.genai.types import ContentCapturingMode +from opentelemetry.util.genai.types import ContentCapturingMode, MessagePart from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList @@ -60,7 +60,7 @@ OutputMessage, to_input_messages, to_output_messages, - to_system_instruction, + to_system_instructions, ) from .otel_wrapper import OTelWrapper from .tool_call_wrapper import wrapped as wrapped_tool @@ -259,20 +259,18 @@ def _config_to_system_instruction( def _create_completion_details_attributes( input_messages: list[InputMessage], output_messages: list[OutputMessage], - system_instruction: Union[InputMessage, None], -): - attributes = { - "gen_ai.input.messages": json.dumps( - [dataclasses.asdict(input_message) for input_message in input_messages] - ), - "gen_ai.output.messages": json.dumps( - [dataclasses.asdict(output_message) for output_message in output_messages] - ), + system_instructions: list[MessagePart], + as_str: bool = False, +) -> dict[str, Any]: + attributes: dict[str, Any] = { + "gen_ai.input.messages": [dataclasses.asdict(input_message) for input_message in input_messages], + "gen_ai.output.messages": [dataclasses.asdict(output_message) for output_message in output_messages], } - if system_instruction: - attributes["gen_ai.system.instructions"] = json.dumps( - dataclasses.asdict(system_instruction) - ) + if system_instructions: + attributes["gen_ai.system.instructions"] = [dataclasses.asdict(sys_instr) for sys_instr in system_instructions] + + if as_str: + return {k: json.dumps(v) for k, v in attributes.items()} return attributes @@ -449,46 +447,45 @@ def _maybe_log_completion_details( attributes = { gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system, } - system_instruction = None + system_instructions = [] if system_content := _config_to_system_instruction(config): - system_instruction = to_system_instruction( + system_instructions = to_system_instructions( content=transformers.t_contents(system_content)[0] ) input_messages = to_input_messages(contents=transformers.t_contents(request)) output_messages = to_output_messages(candidates=response.candidates or []) - completion_details_attributes = _create_completion_details_attributes( - input_messages, output_messages, system_instruction - ) + span = None if self._content_recording_enabled in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: + completion_details_attributes = _create_completion_details_attributes( + input_messages, output_messages, system_instructions, as_str=True, + ) span = trace.get_current_span() span.set_attributes(completion_details_attributes) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: + completion_details_attributes = _create_completion_details_attributes( + input_messages, output_messages, system_instructions, + ) attributes.update(completion_details_attributes) event = Event(name="gen_ai.completion.details", attributes=attributes) hook = load_upload_hook() hook.upload( inputs=input_messages, outputs=output_messages, - system_instruction=( - system_instruction.parts if system_instruction else [] - ), + system_instruction=system_instructions, span=span, log_record=event, ) - # TODO Cannot access attribute shutdown for class UploadHook - # hook.shutdown() - self._otel_wrapper.log_completion_details( - event=event, - ) + hook.shutdown() + self._otel_wrapper.log_completion_details(event=event) def _maybe_log_system_instruction( self, config: Optional[GenerateContentConfigOrDict] = None diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index eaf470892e..2f56e0d484 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -39,7 +39,7 @@ def to_input_messages( *, contents: list[genai_types.Content], ) -> list[InputMessage]: - return [_to_input_message(content) for content in contents]) + return [_to_input_message(content) for content in contents] def to_output_messages( *, @@ -63,11 +63,14 @@ def content_to_output_message( ) return [message for message in messages if message is not None] -def to_system_instruction( +def to_system_instructions( *, content: genai_types.Content, -) -> InputMessage: - return _to_input_message(content) +) -> list[MessagePart]: + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) + return [part for part in parts if part is not None] def _to_input_message( content: genai_types.Content, @@ -114,11 +117,11 @@ def tool_call_id(name: str | None) -> str: _logger.info("Unknown part dropped from telemetry %s", part) return None -def _to_role(role: str | None) -> Role | str: +def _to_role(role: str | None) -> str: if role == "user": - return Role.USER + return Role.USER.value if role == "model": - return Role.ASSISTANT + return Role.ASSISTANT.value return "" From eeed7d5b7695313be88f0ed174707e5471bd9a6f Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 11:55:39 +0000 Subject: [PATCH 03/34] fix: proper event name --- .../instrumentation/google_genai/generate_content.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index fa47ac2a45..9b653d4a2c 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -475,7 +475,7 @@ def _maybe_log_completion_details( input_messages, output_messages, system_instructions, ) attributes.update(completion_details_attributes) - event = Event(name="gen_ai.completion.details", attributes=attributes) + event = Event(name="gen_ai.client.inference.operation.details", attributes=attributes) hook = load_upload_hook() hook.upload( inputs=input_messages, From e1738c2a6d89d2ff00e6dce1e9477ce6c5a37886 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 12:52:30 +0000 Subject: [PATCH 04/34] refactor: remove unused message models. --- .../instrumentation/google_genai/message.py | 47 +++++++-------- .../google_genai/message_models.py | 58 ------------------- 2 files changed, 21 insertions(+), 84 deletions(-) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index 2f56e0d484..760589d1f8 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -15,32 +15,37 @@ from __future__ import annotations import logging +from enum import Enum from google.genai import types as genai_types from opentelemetry.util.genai.types import ( + FinishReason, InputMessage, - OutputMessage, MessagePart, - FinishReason, + OutputMessage, Text, ToolCall, ToolCallResponse, ) -from .message_models import ( - # BlobPart, - # FileDataPart, - Role, -) + +class Role(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + TOOL = "tool" + _logger = logging.getLogger(__name__) + def to_input_messages( *, contents: list[genai_types.Content], ) -> list[InputMessage]: return [_to_input_message(content) for content in contents] + def to_output_messages( *, candidates: list[genai_types.Candidate], @@ -58,32 +63,29 @@ def content_to_output_message( parts=message.parts, ) - messages = ( - content_to_output_message(candidate) for candidate in candidates - ) + messages = (content_to_output_message(candidate) for candidate in candidates) return [message for message in messages if message is not None] + def to_system_instructions( *, content: genai_types.Content, ) -> list[MessagePart]: - parts = ( - _to_part(part, idx) for idx, part in enumerate(content.parts or []) - ) + parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) return [part for part in parts if part is not None] + def _to_input_message( content: genai_types.Content, ) -> InputMessage: - parts = ( - _to_part(part, idx) for idx, part in enumerate(content.parts or []) - ) + parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) return InputMessage( role=_to_role(content.role), # filter Nones parts=[part for part in parts if part is not None], ) + def _to_part(part: genai_types.Part, idx: int) -> MessagePart | None: def tool_call_id(name: str | None) -> str: if name: @@ -93,30 +95,23 @@ def tool_call_id(name: str | None) -> str: if (text := part.text) is not None: return Text(content=text) - # if data := part.inline_data: # TODO ??? - # return BlobPart(mime_type=data.mime_type or "", data=data.data or b"") - - # if data := part.file_data: # TODO ??? - # return FileDataPart( - # mime_type=data.mime_type or "", file_uri=data.file_uri or "" - # ) - if call := part.function_call: return ToolCall( - id=call.id or tool_call_id(call.name), # TODO ??? + id=call.id or tool_call_id(call.name), name=call.name or "", arguments=call.args, ) if response := part.function_response: return ToolCallResponse( - id=response.id or tool_call_id(response.name), # TODO ??? + id=response.id or tool_call_id(response.name), response=response.response, ) _logger.info("Unknown part dropped from telemetry %s", part) return None + def _to_role(role: str | None) -> str: if role == "user": return Role.USER.value diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py deleted file mode 100644 index bf5b804ba7..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copied and adapted from -# https://gist.github.com/lmolkova/09ba0de7f68280f1eac27a6acfd9b1a6?permalink_comment_id=5578799#gistcomment-5578799 - -from enum import Enum -from typing import Annotated, Literal - -from pydantic import Base64Encoder, BaseModel, EncodedBytes - - -class Base64OneWayEncoder(Base64Encoder): - @classmethod - def decode(cls, data: bytes) -> bytes: - """NoOp""" - return data - - -Base64EncodedBytes = Annotated[ - bytes, EncodedBytes(encoder=Base64OneWayEncoder) -] - - -class Role(str, Enum): - SYSTEM = "system" - USER = "user" - ASSISTANT = "assistant" - TOOL = "tool" - - -class BlobPart(BaseModel): - type: Literal["blob"] = "blob" - mime_type: str - data: Base64EncodedBytes - - class Config: - extra = "allow" - - -class FileDataPart(BaseModel): - type: Literal["file_data"] = "file_data" - mime_type: str - file_uri: str - - class Config: - extra = "allow" From f1feaf7373b9d988e2e4a8c43094f2fdcd3fa454 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 12:54:31 +0000 Subject: [PATCH 05/34] refactor: use OTel imported semconv attributes. --- .../google_genai/generate_content.py | 36 +++++++++++++------ 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 9b653d4a2c..a786abf2b9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -48,7 +48,12 @@ ) from opentelemetry.semconv.attributes import error_attributes from opentelemetry.trace.span import Span -from opentelemetry.util.genai.types import ContentCapturingMode, MessagePart +from opentelemetry.util.genai.types import ( + ContentCapturingMode, + MessagePart, + InputMessage, + OutputMessage, +) from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList @@ -56,8 +61,6 @@ from .dict_util import flatten_dict from .flags import is_content_recording_enabled from .message import ( - InputMessage, - OutputMessage, to_input_messages, to_output_messages, to_system_instructions, @@ -263,11 +266,17 @@ def _create_completion_details_attributes( as_str: bool = False, ) -> dict[str, Any]: attributes: dict[str, Any] = { - "gen_ai.input.messages": [dataclasses.asdict(input_message) for input_message in input_messages], - "gen_ai.output.messages": [dataclasses.asdict(output_message) for output_message in output_messages], + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: [ + dataclasses.asdict(input_message) for input_message in input_messages + ], + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: [ + dataclasses.asdict(output_message) for output_message in output_messages + ], } if system_instructions: - attributes["gen_ai.system.instructions"] = [dataclasses.asdict(sys_instr) for sys_instr in system_instructions] + attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS] = [ + dataclasses.asdict(sys_instr) for sys_instr in system_instructions + ] if as_str: return {k: json.dumps(v) for k, v in attributes.items()} @@ -455,15 +464,16 @@ def _maybe_log_completion_details( input_messages = to_input_messages(contents=transformers.t_contents(request)) output_messages = to_output_messages(candidates=response.candidates or []) - - span = None if self._content_recording_enabled in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: completion_details_attributes = _create_completion_details_attributes( - input_messages, output_messages, system_instructions, as_str=True, + input_messages, + output_messages, + system_instructions, + as_str=True, ) span = trace.get_current_span() span.set_attributes(completion_details_attributes) @@ -472,10 +482,14 @@ def _maybe_log_completion_details( ContentCapturingMode.SPAN_AND_EVENT, ]: completion_details_attributes = _create_completion_details_attributes( - input_messages, output_messages, system_instructions, + input_messages, + output_messages, + system_instructions, ) attributes.update(completion_details_attributes) - event = Event(name="gen_ai.client.inference.operation.details", attributes=attributes) + event = Event( + name="gen_ai.client.inference.operation.details", attributes=attributes + ) hook = load_upload_hook() hook.upload( inputs=input_messages, From ee911f570cf067f775ccda6ad4d697d2f54ca3b8 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 13:28:45 +0000 Subject: [PATCH 06/34] refactor: Inject upload_hook in Instrumentor. --- .../google_genai/generate_content.py | 21 +++++++++++++++---- .../google_genai/instrumentor.py | 3 +++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index a786abf2b9..73c178c855 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -54,7 +54,7 @@ InputMessage, OutputMessage, ) -from opentelemetry.util.genai.upload_hook import load_upload_hook +from opentelemetry.util.genai.upload_hook import UploadHook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG @@ -290,12 +290,14 @@ def __init__( models_object: Union[Models, AsyncModels], otel_wrapper: OTelWrapper, model: str, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): self._start_time = time.time_ns() self._otel_wrapper = otel_wrapper self._genai_system = _determine_genai_system(models_object) self._genai_request_model = model + self.upload_hook = upload_hook self._finish_reasons_set = set() self._error_type = None self._input_tokens = 0 @@ -490,15 +492,13 @@ def _maybe_log_completion_details( event = Event( name="gen_ai.client.inference.operation.details", attributes=attributes ) - hook = load_upload_hook() - hook.upload( + self.upload_hook.upload( inputs=input_messages, outputs=output_messages, system_instruction=system_instructions, span=span, log_record=event, ) - hook.shutdown() self._otel_wrapper.log_completion_details(event=event) def _maybe_log_system_instruction( @@ -689,6 +689,7 @@ def _record_duration_metric(self): def _create_instrumented_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content @@ -706,6 +707,7 @@ def instrumented_generate_content( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -746,6 +748,7 @@ def instrumented_generate_content( def _create_instrumented_generate_content_stream( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content_stream @@ -763,6 +766,7 @@ def instrumented_generate_content_stream( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -803,6 +807,7 @@ def instrumented_generate_content_stream( def _create_instrumented_async_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content @@ -820,6 +825,7 @@ async def instrumented_generate_content( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -861,6 +867,7 @@ async def instrumented_generate_content( def _create_instrumented_async_generate_content_stream( # type: ignore snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content_stream @@ -878,6 +885,7 @@ async def instrumented_generate_content_stream( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -936,28 +944,33 @@ def uninstrument_generate_content(snapshot: object): def instrument_generate_content( otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ) -> object: snapshot = _MethodsSnapshot() Models.generate_content = _create_instrumented_generate_content( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) Models.generate_content_stream = _create_instrumented_generate_content_stream( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content = _create_instrumented_async_generate_content( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content_stream = ( _create_instrumented_async_generate_content_stream( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py index 8a3f792651..5983b18b5a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py @@ -18,6 +18,7 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider +from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList from .generate_content import ( @@ -58,8 +59,10 @@ def _instrument(self, **kwargs: Any): event_logger_provider=event_logger_provider, meter_provider=meter_provider, ) + upload_hook = kwargs.get("upload_hook") or load_upload_hook() self._generate_content_snapshot = instrument_generate_content( otel_wrapper, + upload_hook, generate_content_config_key_allowlist=self._generate_content_config_key_allowlist, ) From 8583327e8c785c8453c6c6d9472f000f3ec84d4a Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Mon, 29 Sep 2025 09:23:16 +0000 Subject: [PATCH 07/34] refactor: rename upload hook to completion hook. --- .../google_genai/generate_content.py | 34 +++++++++---------- .../google_genai/instrumentor.py | 6 ++-- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 73c178c855..971a0670cc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -54,7 +54,7 @@ InputMessage, OutputMessage, ) -from opentelemetry.util.genai.upload_hook import UploadHook +from opentelemetry.util.genai.completion_hook import CompletionHook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG @@ -290,14 +290,14 @@ def __init__( models_object: Union[Models, AsyncModels], otel_wrapper: OTelWrapper, model: str, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): self._start_time = time.time_ns() self._otel_wrapper = otel_wrapper self._genai_system = _determine_genai_system(models_object) self._genai_request_model = model - self.upload_hook = upload_hook + self.completion_hook = completion_hook self._finish_reasons_set = set() self._error_type = None self._input_tokens = 0 @@ -492,7 +492,7 @@ def _maybe_log_completion_details( event = Event( name="gen_ai.client.inference.operation.details", attributes=attributes ) - self.upload_hook.upload( + self.completion_hook.on_completion( inputs=input_messages, outputs=output_messages, system_instruction=system_instructions, @@ -689,7 +689,7 @@ def _record_duration_metric(self): def _create_instrumented_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content @@ -707,7 +707,7 @@ def instrumented_generate_content( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -748,7 +748,7 @@ def instrumented_generate_content( def _create_instrumented_generate_content_stream( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content_stream @@ -766,7 +766,7 @@ def instrumented_generate_content_stream( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -807,7 +807,7 @@ def instrumented_generate_content_stream( def _create_instrumented_async_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content @@ -825,7 +825,7 @@ async def instrumented_generate_content( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -867,7 +867,7 @@ async def instrumented_generate_content( def _create_instrumented_async_generate_content_stream( # type: ignore snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content_stream @@ -885,7 +885,7 @@ async def instrumented_generate_content_stream( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -944,33 +944,33 @@ def uninstrument_generate_content(snapshot: object): def instrument_generate_content( otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ) -> object: snapshot = _MethodsSnapshot() Models.generate_content = _create_instrumented_generate_content( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) Models.generate_content_stream = _create_instrumented_generate_content_stream( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content = _create_instrumented_async_generate_content( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content_stream = ( _create_instrumented_async_generate_content_stream( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py index 5983b18b5a..c1110663ff 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py @@ -18,7 +18,7 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider -from opentelemetry.util.genai.upload_hook import load_upload_hook +from opentelemetry.util.genai.completion_hook import load_completion_hook from .allowlist_util import AllowList from .generate_content import ( @@ -59,10 +59,10 @@ def _instrument(self, **kwargs: Any): event_logger_provider=event_logger_provider, meter_provider=meter_provider, ) - upload_hook = kwargs.get("upload_hook") or load_upload_hook() + completion_hook = kwargs.get("completion_hook") or load_completion_hook() self._generate_content_snapshot = instrument_generate_content( otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=self._generate_content_config_key_allowlist, ) From 47d57289e0d04fb8d006238b4c5bcaaba0987aae Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Tue, 30 Sep 2025 16:07:28 +0000 Subject: [PATCH 08/34] test: add tests for non streaming case and tool_call_wrapper. --- .../generate_content/nonstreaming_base.py | 68 +++++++++++++------ .../tests/requirements.latest.txt | 5 +- .../tests/utils/test_tool_call_wrapper.py | 46 +++++++++++-- 3 files changed, 91 insertions(+), 28 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 39f1dfe927..7b79c40594 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -13,8 +13,15 @@ # limitations under the License. import json -import os import unittest +from unittest.mock import patch + +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) +from opentelemetry.util.genai.types import ContentCapturingMode from .base import TestCase @@ -111,10 +118,8 @@ def test_generated_span_counts_tokens(self): self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123) self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456) + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_records_system_prompt_as_log(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) config = {"system_instruction": "foo"} self.configure_valid_response() self.generate_content( @@ -125,10 +130,8 @@ def test_records_system_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "foo") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "false" - ) config = {"system_instruction": "foo"} self.configure_valid_response() self.generate_content( @@ -139,20 +142,16 @@ def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present( self, ): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_does_not_have_event_named("gen_ai.system.message") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_records_user_prompt_as_log(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.user.message") @@ -160,10 +159,8 @@ def test_records_user_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "Some input") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "false" - ) self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.user.message") @@ -171,10 +168,8 @@ def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_records_response_as_log(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.choice") @@ -184,10 +179,8 @@ def test_records_response_as_log(self): "Some response content", json.dumps(event_record.body["content"]) ) + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) def test_does_not_record_response_as_log_if_disabled_by_env(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "false" - ) self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.choice") @@ -195,6 +188,37 @@ def test_does_not_record_response_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") + def test_new_semconv_record_response_as_log(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + self.configure_valid_response(text="Some response content") + self.generate_content(model="gemini-2.0-flash", contents="Some input") + + if mode in [ + ContentCapturingMode.NO_CONTENT, + ContentCapturingMode.SPAN_ONLY, + ]: + self.otel.assert_does_not_have_event_named("gen_ai.client.inference.operation.details") + else: + self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") + + self.tearDown() + def test_records_metrics_data(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt index 32cf3422f5..3ee7b91536 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt @@ -41,9 +41,10 @@ pytest-asyncio==0.21.0 pytest-vcr==1.0.2 google-auth==2.38.0 -google-genai==1.0.0 +google-genai==1.32.0 # Install locally from the folder. This path is relative to the # root directory, given invocation from "tox" at root level. -e opentelemetry-instrumentation --e instrumentation-genai/opentelemetry-instrumentation-google-genai \ No newline at end of file +-e instrumentation-genai/opentelemetry-instrumentation-google-genai +-e util/opentelemetry-util-genai diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py index 3c8aee3f70..61d4799913 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py @@ -17,14 +17,16 @@ from unittest.mock import patch from google.genai import types as genai_types - from opentelemetry._events import get_event_logger_provider -from opentelemetry.instrumentation.google_genai import ( - otel_wrapper, - tool_call_wrapper, +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, ) +from opentelemetry.instrumentation.google_genai import otel_wrapper, tool_call_wrapper from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider +from opentelemetry.util.genai.types import ContentCapturingMode from ..common import otel_mocker @@ -278,3 +280,39 @@ def somefunction(arg=None): span.attributes["code.function.parameters.arg.value"], '[123, "abc"]', ) + + def test_handle_with_new_sem_conv(self): + def somefunction(arg=None): + pass + + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + wrapped_somefunction = self.wrap(somefunction) + wrapped_somefunction(12345) + + span = self.otel.get_span_named("execute_tool somefunction") + + print(mode) + if mode in [ + ContentCapturingMode.NO_CONTENT, + ContentCapturingMode.EVENT_ONLY, + ]: + self.assertNotIn("code.function.parameters.arg.value", span.attributes) + else: + self.assertIn("code.function.parameters.arg.value", span.attributes) + self.tearDown() From b3a6efba26f6899fa287cb2acbedae8ca850eed3 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Tue, 30 Sep 2025 17:35:18 +0000 Subject: [PATCH 09/34] test: add tool call instrumentation tests and nonstreaming recording in spans test. --- .../generate_content/nonstreaming_base.py | 40 +++++- .../test_tool_call_instrumentation.py | 129 ++++++++++++++++++ 2 files changed, 167 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 7b79c40594..8b919ffe62 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -16,6 +16,7 @@ import unittest from unittest.mock import patch +from google.genai.types import GenerateContentConfig from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, @@ -188,7 +189,7 @@ def test_does_not_record_response_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") - def test_new_semconv_record_response_as_log(self): + def test_new_semconv_record_completion_as_log(self): for mode in ContentCapturingMode: patched_environ = patch.dict( "os.environ", @@ -218,7 +219,42 @@ def test_new_semconv_record_response_as_log(self): self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") self.tearDown() - + + def test_new_semconv_record_completion_in_span(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + self.configure_valid_response(text="Some response content") + self.generate_content(model="gemini-2.0-flash", contents="Some input", config=GenerateContentConfig(system_instruction="System instruction")) + span = self.otel.get_span_named("generate_content gemini-2.0-flash") + if mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + self.assertEqual(span.attributes["gen_ai.input.messages"], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') + self.assertEqual(span.attributes["gen_ai.output.messages"], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') + self.assertEqual(span.attributes["gen_ai.system_instructions"], '[{"content": "System instruction", "type": "text"}]') + else: + self.assertNotIn("gen_ai.input.messages", span.attributes) + self.assertNotIn("gen_ai.output.messages", span.attributes) + self.assertNotIn("gen_ai.system_instructions", span.attributes) + + self.tearDown() + def test_records_metrics_data(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py index 7e06422812..7439944e37 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py @@ -15,6 +15,12 @@ from unittest.mock import patch import google.genai.types as genai_types +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) +from opentelemetry.util.genai.types import ContentCapturingMode from .base import TestCase @@ -275,3 +281,126 @@ def somefunction(x, y=2): self.assertNotIn( "code.function.return.value", generated_span.attributes ) + + def test_new_semconv_tool_calls_record_parameter_values(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + calls = [] + + def handle(*args, **kwargs): + calls.append((args, kwargs)) + return "some result" + + def somefunction(someparam, otherparam=2): + print("someparam=%s, otherparam=%s", someparam, otherparam) + + self.mock_generate_content.side_effect = handle + self.client.models.generate_content( + model="some-model-name", + contents="Some content", + config={ + "tools": [somefunction], + }, + ) + self.assertEqual(len(calls), 1) + config = calls[0][1]["config"] + tools = config.tools + wrapped_somefunction = tools[0] + wrapped_somefunction(123, otherparam="abc") + self.otel.assert_has_span_named("execute_tool somefunction") + generated_span = self.otel.get_span_named("execute_tool somefunction") + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.someparam.type" + ], + "int", + ) + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.otherparam.type" + ], + "str", + ) + if mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + self.assertEqual(generated_span.attributes["code.function.parameters.someparam.value"], 123) + self.assertEqual(generated_span.attributes["code.function.parameters.otherparam.value"], "abc") + else: + self.assertNotIn("code.function.parameters.someparam.value", generated_span.attributes) + self.assertNotIn("code.function.parameters.otherparam.value", generated_span.attributes) + self.tearDown() + + def test_new_semconv_tool_calls_record_return_values(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + calls = [] + + def handle(*args, **kwargs): + calls.append((args, kwargs)) + return "some result" + + def somefunction(x, y=2): + return x + y + + self.mock_generate_content.side_effect = handle + self.client.models.generate_content( + model="some-model-name", + contents="Some content", + config={ + "tools": [somefunction], + }, + ) + self.assertEqual(len(calls), 1) + config = calls[0][1]["config"] + tools = config.tools + wrapped_somefunction = tools[0] + wrapped_somefunction(123) + self.otel.assert_has_span_named("execute_tool somefunction") + generated_span = self.otel.get_span_named("execute_tool somefunction") + self.assertEqual( + generated_span.attributes["code.function.return.type"], "int" + ) + if mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + self.assertIn( + "code.function.return.value", generated_span.attributes + ) + else: + self.assertNotIn( + "code.function.return.value", generated_span.attributes + ) + self.tearDown() From 2eefc39b039b55a55a0768d1ca7e489e3417bfae Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 1 Oct 2025 12:39:36 +0000 Subject: [PATCH 10/34] fix: remove print --- .../tests/utils/test_tool_call_wrapper.py | 1 - 1 file changed, 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py index 61d4799913..6bfa53cf5e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py @@ -307,7 +307,6 @@ def somefunction(arg=None): span = self.otel.get_span_named("execute_tool somefunction") - print(mode) if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.EVENT_ONLY, From 166c08188229dc598efba71710a52ec94966ec34 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 1 Oct 2025 14:18:14 +0000 Subject: [PATCH 11/34] feature: add blobpart and filepart to message handling --- .../instrumentation/google_genai/message.py | 12 ++++++++++++ .../src/opentelemetry/util/genai/types.py | 17 +++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index 760589d1f8..d888efb4a7 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -19,6 +19,8 @@ from google.genai import types as genai_types from opentelemetry.util.genai.types import ( + BlobPart, + FileDataPart, FinishReason, InputMessage, MessagePart, @@ -95,6 +97,16 @@ def tool_call_id(name: str | None) -> str: if (text := part.text) is not None: return Text(content=text) + if data := part.inline_data: + return BlobPart( + mime_type=data.mime_type or "", data=data.data or b"" + ) + + if data := part.file_data: + return FileDataPart( + mime_type=data.mime_type or "", uri=data.file_uri or "" + ) + if call := part.function_call: return ToolCall( id=call.id or tool_call_id(call.name), diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py index 569e7e7e00..4a3952d16e 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py @@ -55,6 +55,23 @@ class Text: type: Literal["text"] = "text" +@dataclass +class BlobPart: + data: bytes + mime_type: str + type: Literal["blob"] = "blob" + + +@dataclass +class FileDataPart: + mime_type: str + uri: str + type: Literal["file_data"] = "file_data" + + class Config: + extra = "allow" + + MessagePart = Union[Text, ToolCall, ToolCallResponse, Any] From ae4da6428ebeafb84cb77ddf7c78f9581f6d4cfa Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 11:06:49 +0000 Subject: [PATCH 12/34] fix: encode bytes as base64 when dumping to json string. --- .../instrumentation/google_genai/generate_content.py | 3 ++- .../instrumentation/google_genai/message.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 971a0670cc..07cdd0d439 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -61,6 +61,7 @@ from .dict_util import flatten_dict from .flags import is_content_recording_enabled from .message import ( + Base64JsonEncoder, to_input_messages, to_output_messages, to_system_instructions, @@ -279,7 +280,7 @@ def _create_completion_details_attributes( ] if as_str: - return {k: json.dumps(v) for k, v in attributes.items()} + return {k: json.dumps(v, cls=Base64JsonEncoder) for k, v in attributes.items()} return attributes diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index d888efb4a7..ff94d864e4 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -14,8 +14,11 @@ from __future__ import annotations +from base64 import b64encode +import json import logging from enum import Enum +from typing import Any from google.genai import types as genai_types from opentelemetry.util.genai.types import ( @@ -38,6 +41,13 @@ class Role(str, Enum): TOOL = "tool" +class Base64JsonEncoder(json.JSONEncoder): + def default(self, o: Any) -> Any: + if isinstance(o, bytes): + return b64encode(o).decode() + return super().default(o) + + _logger = logging.getLogger(__name__) From 13b6f2ea7a6089a063aac8f6c6538eddfc1154cf Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 13:12:59 +0000 Subject: [PATCH 13/34] fix: always call completion hook, independently of recording settings. --- .../google_genai/generate_content.py | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 07cdd0d439..185a9d36fc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -467,7 +467,17 @@ def _maybe_log_completion_details( input_messages = to_input_messages(contents=transformers.t_contents(request)) output_messages = to_output_messages(candidates=response.candidates or []) - span = None + span = trace.get_current_span() + event = Event( + name="gen_ai.client.inference.operation.details", attributes=attributes + ) + self.completion_hook.on_completion( + inputs=input_messages, + outputs=output_messages, + system_instruction=system_instructions, + span=span, + log_record=event, + ) if self._content_recording_enabled in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, @@ -478,7 +488,6 @@ def _maybe_log_completion_details( system_instructions, as_str=True, ) - span = trace.get_current_span() span.set_attributes(completion_details_attributes) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, @@ -489,18 +498,11 @@ def _maybe_log_completion_details( output_messages, system_instructions, ) - attributes.update(completion_details_attributes) - event = Event( - name="gen_ai.client.inference.operation.details", attributes=attributes - ) - self.completion_hook.on_completion( - inputs=input_messages, - outputs=output_messages, - system_instruction=system_instructions, - span=span, - log_record=event, - ) - self._otel_wrapper.log_completion_details(event=event) + event.attributes = { + **(event.attributes or {}), + **completion_details_attributes, + } + self._otel_wrapper.log_completion_details(event=event) def _maybe_log_system_instruction( self, config: Optional[GenerateContentConfigOrDict] = None From 058da13430eaa2566075cb621b7533cdacbf6007 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 13:23:19 +0000 Subject: [PATCH 14/34] test: update requirements for instrumentation-google-genai oldest env --- .../tests/requirements.oldest.txt | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt index 50fc45f39f..19f08bfecc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt @@ -21,11 +21,12 @@ pytest-vcr==1.0.2 google-auth==2.15.0 google-genai==1.0.0 -opentelemetry-api==1.31.1 -opentelemetry-sdk==1.31.1 -opentelemetry-semantic-conventions==0.52b1 -opentelemetry-instrumentation==0.52b1 +opentelemetry-api==1.37.0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentelemetry-instrumentation==0.58b0 # Install locally from the folder. This path is relative to the # root directory, given invocation from "tox" at root level. -e instrumentation-genai/opentelemetry-instrumentation-google-genai +-e util/opentelemetry-util-genai From 17b9c0aa4e5aeda13711bf7ca9d89a14dba04260 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 14:33:16 +0000 Subject: [PATCH 15/34] test: bump google-genai lib version in -oldest test env. --- .../tests/requirements.oldest.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt index 19f08bfecc..2228363418 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt @@ -20,7 +20,7 @@ pytest-asyncio==0.21.0 pytest-vcr==1.0.2 google-auth==2.15.0 -google-genai==1.0.0 +google-genai==1.32.0 opentelemetry-api==1.37.0 opentelemetry-sdk==1.37.0 opentelemetry-semantic-conventions==0.58b0 From 057d956206c426b8c915fdfd1581248629c6de29 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Fri, 3 Oct 2025 12:08:02 +0000 Subject: [PATCH 16/34] test: fix event recording test. --- .../generate_content/nonstreaming_base.py | 43 +++++++++++++------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 8b919ffe62..039d077b72 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -17,11 +17,15 @@ from unittest.mock import patch from google.genai.types import GenerateContentConfig +from opentelemetry._events import Event from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, _StabilityMode, ) +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes, +) from opentelemetry.util.genai.types import ContentCapturingMode from .base import TestCase @@ -204,20 +208,33 @@ def test_new_semconv_record_completion_as_log(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + content = "Some input" + output = "Some response content" + sys_instr = "System instruction" + with self.subTest(f"mode: {mode}", patched_environ=patched_environ): self.setUp() with patched_environ, patched_otel_mapping: - self.configure_valid_response(text="Some response content") - self.generate_content(model="gemini-2.0-flash", contents="Some input") - + self.configure_valid_response(text=output) + self.generate_content(model="gemini-2.0-flash", contents=content, config=GenerateContentConfig(system_instruction=sys_instr)) + self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") + event = self.otel.get_event_named("gen_ai.client.inference.operation.details") if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.SPAN_ONLY, ]: - self.otel.assert_does_not_have_event_named("gen_ai.client.inference.operation.details") + self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, event.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, event.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, event.attributes) else: - self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") - + attrs = { + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: ({"role": "user", "parts": ({"content": content, "type": "text"},)},), + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: ({"role": "assistant", "parts": ({"content": output, "type": "text"},), "finish_reason": ""},), + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: ({"content": sys_instr, "type": "text"},) + } + expected_event = Event("gen_ai.client.inference.operation.details", attributes=attrs) + self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES]) + self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES]) + self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], expected_event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS]) self.tearDown() def test_new_semconv_record_completion_in_span(self): @@ -245,13 +262,13 @@ def test_new_semconv_record_completion_in_span(self): ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - self.assertEqual(span.attributes["gen_ai.input.messages"], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') - self.assertEqual(span.attributes["gen_ai.output.messages"], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') - self.assertEqual(span.attributes["gen_ai.system_instructions"], '[{"content": "System instruction", "type": "text"}]') + self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') + self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') + self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], '[{"content": "System instruction", "type": "text"}]') else: - self.assertNotIn("gen_ai.input.messages", span.attributes) - self.assertNotIn("gen_ai.output.messages", span.attributes) - self.assertNotIn("gen_ai.system_instructions", span.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, span.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, span.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, span.attributes) self.tearDown() From 486796a98dff23f53ec635946fea1f55dcd1337d Mon Sep 17 00:00:00 2001 From: Dylan Russell Date: Mon, 6 Oct 2025 14:22:03 +0000 Subject: [PATCH 17/34] Update FakeCredentials --- .../tests/common/auth.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py index 88831a3e9a..f46de7ca3a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py @@ -16,5 +16,8 @@ class FakeCredentials(google.auth.credentials.AnonymousCredentials): + def __init__(self): + self.token = 'a' + self._quota_project_id = 'a' def refresh(self, request): pass From 205aa8873c1dbad4ee76af2c505608ef31260c40 Mon Sep 17 00:00:00 2001 From: Dylan Russell Date: Mon, 6 Oct 2025 15:53:17 +0000 Subject: [PATCH 18/34] Fix tests --- .../google_genai/generate_content.py | 90 ++++++--- .../google_genai/instrumentor.py | 4 +- .../instrumentation/google_genai/message.py | 19 +- .../google_genai/otel_wrapper.py | 21 ++- .../google_genai/tool_call_wrapper.py | 41 ++-- .../tests/common/auth.py | 5 +- ...mini-1.5-flash-002-vertexaiapi-async].yaml | 94 --------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 94 --------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 ++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 102 ++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 94 --------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 94 --------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 ++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 102 ++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 97 ---------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 102 ---------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 ++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 109 +++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 99 ---------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 99 ---------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 123 ++++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 109 +++++++++++ .../generate_content/nonstreaming_base.py | 178 +++++++++++++++--- .../tests/generate_content/test_e2e.py | 14 +- .../test_tool_call_instrumentation.py | 64 +++++-- .../tests/utils/test_tool_call_wrapper.py | 24 ++- 26 files changed, 1200 insertions(+), 884 deletions(-) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 185a9d36fc..60d6136e4e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -35,6 +35,7 @@ GenerateContentConfigOrDict, GenerateContentResponse, ) + from opentelemetry import trace from opentelemetry._events import Event from opentelemetry.instrumentation._semconv import ( @@ -48,13 +49,13 @@ ) from opentelemetry.semconv.attributes import error_attributes from opentelemetry.trace.span import Span +from opentelemetry.util.genai.completion_hook import CompletionHook from opentelemetry.util.genai.types import ( ContentCapturingMode, - MessagePart, InputMessage, + MessagePart, OutputMessage, ) -from opentelemetry.util.genai.completion_hook import CompletionHook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG @@ -165,7 +166,9 @@ def _to_dict(value: object): def _add_request_options_to_span( - span: Span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList + span: Span, + config: Optional[GenerateContentConfigOrDict], + allow_list: AllowList, ): if config is None: return @@ -209,7 +212,9 @@ def _add_request_options_to_span( }, ) for key, value in attributes.items(): - if key.startswith(GCP_GENAI_OPERATION_CONFIG) and not allow_list.allowed(key): + if key.startswith( + GCP_GENAI_OPERATION_CONFIG + ) and not allow_list.allowed(key): # The allowlist is used to control inclusion of the dynamic keys. continue span.set_attribute(key, value) @@ -245,7 +250,9 @@ def _wrapped_config_with_tools( if not config.tools: return config result = copy.copy(config) - result.tools = [wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools] + result.tools = [ + wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools + ] return result @@ -268,10 +275,12 @@ def _create_completion_details_attributes( ) -> dict[str, Any]: attributes: dict[str, Any] = { gen_ai_attributes.GEN_AI_INPUT_MESSAGES: [ - dataclasses.asdict(input_message) for input_message in input_messages + dataclasses.asdict(input_message) + for input_message in input_messages ], gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: [ - dataclasses.asdict(output_message) for output_message in output_messages + dataclasses.asdict(output_message) + for output_message in output_messages ], } if system_instructions: @@ -280,7 +289,10 @@ def _create_completion_details_attributes( ] if as_str: - return {k: json.dumps(v, cls=Base64JsonEncoder) for k, v in attributes.items()} + return { + k: json.dumps(v, cls=Base64JsonEncoder) + for k, v in attributes.items() + } return attributes @@ -464,12 +476,17 @@ def _maybe_log_completion_details( system_instructions = to_system_instructions( content=transformers.t_contents(system_content)[0] ) - input_messages = to_input_messages(contents=transformers.t_contents(request)) - output_messages = to_output_messages(candidates=response.candidates or []) + input_messages = to_input_messages( + contents=transformers.t_contents(request) + ) + output_messages = to_output_messages( + candidates=response.candidates or [] + ) span = trace.get_current_span() event = Event( - name="gen_ai.client.inference.operation.details", attributes=attributes + name="gen_ai.client.inference.operation.details", + attributes=attributes, ) self.completion_hook.on_completion( inputs=input_messages, @@ -482,21 +499,25 @@ def _maybe_log_completion_details( ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - completion_details_attributes = _create_completion_details_attributes( - input_messages, - output_messages, - system_instructions, - as_str=True, + completion_details_attributes = ( + _create_completion_details_attributes( + input_messages, + output_messages, + system_instructions, + as_str=True, + ) ) span.set_attributes(completion_details_attributes) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - completion_details_attributes = _create_completion_details_attributes( - input_messages, - output_messages, - system_instructions, + completion_details_attributes = ( + _create_completion_details_attributes( + input_messages, + output_messages, + system_instructions, + ) ) event.attributes = { **(event.attributes or {}), @@ -541,7 +562,9 @@ def _maybe_log_user_prompt( total = len(contents) index = 0 for entry in contents: - self._maybe_log_single_user_prompt(entry, index=index, total=total) + self._maybe_log_single_user_prompt( + entry, index=index, total=total + ) index += 1 else: self._maybe_log_single_user_prompt(contents) @@ -647,7 +670,9 @@ def _maybe_log_response_stats(self, response: GenerateContentResponse): # pass - def _maybe_log_response_safety_ratings(self, response: GenerateContentResponse): + def _maybe_log_response_safety_ratings( + self, response: GenerateContentResponse + ): # TODO: Determine if there is a way that we can log # the "prompt_feedback". This would be especially useful # in the case where the response is blocked. @@ -917,13 +942,18 @@ async def _response_async_generator_wrapper(): with trace.use_span(span, end_on_exit=True): try: async for response in response_async_generator: - if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + if ( + helper.sem_conv_opt_in_mode + == _StabilityMode.DEFAULT + ): helper.process_response(response) elif ( helper.sem_conv_opt_in_mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL ): - helper.process_completion(contents, response, config) + helper.process_completion( + contents, response, config + ) else: raise ValueError( f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." @@ -969,12 +999,10 @@ def instrument_generate_content( completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) - AsyncModels.generate_content_stream = ( - _create_instrumented_async_generate_content_stream( - snapshot, - otel_wrapper, - completion_hook, - generate_content_config_key_allowlist=generate_content_config_key_allowlist, - ) + AsyncModels.generate_content_stream = _create_instrumented_async_generate_content_stream( + snapshot, + otel_wrapper, + completion_hook, + generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) return snapshot diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py index c1110663ff..ed7d4638a3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py @@ -59,7 +59,9 @@ def _instrument(self, **kwargs: Any): event_logger_provider=event_logger_provider, meter_provider=meter_provider, ) - completion_hook = kwargs.get("completion_hook") or load_completion_hook() + completion_hook = ( + kwargs.get("completion_hook") or load_completion_hook() + ) self._generate_content_snapshot = instrument_generate_content( otel_wrapper, completion_hook, diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index ff94d864e4..6a515163b6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -14,13 +14,14 @@ from __future__ import annotations -from base64 import b64encode import json import logging +from base64 import b64encode from enum import Enum from typing import Any from google.genai import types as genai_types + from opentelemetry.util.genai.types import ( BlobPart, FileDataPart, @@ -75,7 +76,9 @@ def content_to_output_message( parts=message.parts, ) - messages = (content_to_output_message(candidate) for candidate in candidates) + messages = ( + content_to_output_message(candidate) for candidate in candidates + ) return [message for message in messages if message is not None] @@ -83,14 +86,18 @@ def to_system_instructions( *, content: genai_types.Content, ) -> list[MessagePart]: - parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) return [part for part in parts if part is not None] def _to_input_message( content: genai_types.Content, ) -> InputMessage: - parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) return InputMessage( role=_to_role(content.role), # filter Nones @@ -108,9 +115,7 @@ def tool_call_id(name: str | None) -> str: return Text(content=text) if data := part.inline_data: - return BlobPart( - mime_type=data.mime_type or "", data=data.data or b"" - ) + return BlobPart(mime_type=data.mime_type or "", data=data.data or b"") if data := part.file_data: return FileDataPart( diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py index 3d6a5a41a2..675c84b855 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py @@ -17,6 +17,7 @@ from typing import Any import google.genai + from opentelemetry._events import Event, EventLogger, EventLoggerProvider from opentelemetry.metrics import Meter, MeterProvider from opentelemetry.semconv._incubating.metrics import gen_ai_metrics @@ -39,15 +40,17 @@ class OTelWrapper: - def __init__(self, tracer: Tracer, event_logger: EventLogger, meter: Meter): + def __init__( + self, tracer: Tracer, event_logger: EventLogger, meter: Meter + ): self._tracer = tracer self._event_logger = event_logger self._meter = meter self._operation_duration_metric = ( gen_ai_metrics.create_gen_ai_client_operation_duration(meter) ) - self._token_usage_metric = gen_ai_metrics.create_gen_ai_client_token_usage( - meter + self._token_usage_metric = ( + gen_ai_metrics.create_gen_ai_client_token_usage(meter) ) @staticmethod @@ -79,17 +82,23 @@ def operation_duration_metric(self): def token_usage_metric(self): return self._token_usage_metric - def log_system_prompt(self, attributes: dict[str, str], body: dict[str, Any]): + def log_system_prompt( + self, attributes: dict[str, str], body: dict[str, Any] + ): _logger.debug("Recording system prompt.") event_name = "gen_ai.system.message" self._log_event(event_name, attributes, body) - def log_user_prompt(self, attributes: dict[str, str], body: dict[str, Any]): + def log_user_prompt( + self, attributes: dict[str, str], body: dict[str, Any] + ): _logger.debug("Recording user prompt.") event_name = "gen_ai.user.message" self._log_event(event_name, attributes, body) - def log_response_content(self, attributes: dict[str, str], body: dict[str, Any]): + def log_response_content( + self, attributes: dict[str, str], body: dict[str, Any] + ): _logger.debug("Recording response.") event_name = "gen_ai.choice" self._log_event(event_name, attributes, body) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py index 80d83ea3ae..f4303306e3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py @@ -24,15 +24,14 @@ ) from opentelemetry import trace -from opentelemetry.semconv._incubating.attributes import ( - code_attributes, -) from opentelemetry.instrumentation._semconv import ( - _StabilityMode, _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, + _StabilityMode, +) +from opentelemetry.semconv._incubating.attributes import ( + code_attributes, ) - from opentelemetry.util.genai.types import ContentCapturingMode from .flags import is_content_recording_enabled @@ -52,7 +51,9 @@ def _to_otel_value(python_value): if isinstance(python_value, list): return [_to_otel_value(x) for x in python_value] if isinstance(python_value, dict): - return {key: _to_otel_value(val) for (key, val) in python_value.items()} + return { + key: _to_otel_value(val) for (key, val) in python_value.items() + } if hasattr(python_value, "model_dump"): return python_value.model_dump() if hasattr(python_value, "__dict__"): @@ -120,7 +121,9 @@ def _create_function_span_attributes( return result -def _record_function_call_argument(span, param_name, param_value, include_values): +def _record_function_call_argument( + span, param_name, param_value, include_values +): attribute_prefix = f"code.function.parameters.{param_name}" type_attribute = f"{attribute_prefix}.type" span.set_attribute(type_attribute, type(param_value).__name__) @@ -152,7 +155,9 @@ def _record_function_call_result(otel_wrapper, wrapped_function, result): span = trace.get_current_span() span.set_attribute("code.function.return.type", type(result).__name__) if include_values: - span.set_attribute("code.function.return.value", _to_otel_attribute(result)) + span.set_attribute( + "code.function.return.value", _to_otel_attribute(result) + ) def _wrap_sync_tool_function( @@ -167,8 +172,12 @@ def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span(span_name, attributes=attributes): - _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) + with otel_wrapper.start_as_current_span( + span_name, attributes=attributes + ): + _record_function_call_arguments( + otel_wrapper, tool_function, args, kwargs + ) result = tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -188,8 +197,12 @@ async def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span(span_name, attributes=attributes): - _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) + with otel_wrapper.start_as_current_span( + span_name, attributes=attributes + ): + _record_function_call_arguments( + otel_wrapper, tool_function, args, kwargs + ) result = await tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -215,7 +228,9 @@ def wrapped( if tool_or_tools is None: return None if isinstance(tool_or_tools, list): - return [wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools] + return [ + wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools + ] if isinstance(tool_or_tools, dict): return { key: wrapped(value, otel_wrapper, **kwargs) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py index f46de7ca3a..5719ddf0f8 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py @@ -17,7 +17,8 @@ class FakeCredentials(google.auth.credentials.AnonymousCredentials): def __init__(self): - self.token = 'a' - self._quota_project_id = 'a' + self.token = "a" + self._quota_project_id = "a" + def refresh(self, request): pass diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index c251cc104b..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark, inscrutable ways,\nTo trace a request through hazy days.\nOpen Telemetry, a beacon bright,\nIlluminates the path, both day and night.\n\nFrom metrics gathered, a clear display,\nOf latency's dance, and errors' sway.\nTraces unwind, a silken thread,\nShowing the journey, from start to head.\n\nLogs interweave, a richer hue,\nContextual clues, for me and you.\nNo vendor lock-in, a freedom's call,\nTo choose your tools, to stand up tall.\n\nExporters aplenty, a varied choice,\nTo send your data, amplify your voice.\nJaeger, Zipkin, Prometheus' might,\nAll integrate, a glorious sight.\n\nWith spans and attributes, a detailed scene,\nOf how your system works, both sleek and keen.\nPerformance bottlenecks, now laid bare,\nOpen Telemetry, beyond compare.\n\nSo embrace the light, let darkness flee,\nWith Open Telemetry, set your systems free.\nObserve, and learn, and optimize with grace,\nA brighter future, in this digital space.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.3303731600443522 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 240, - "totalTokenCount": 248, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 240 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:18.083091Z", - "responseId": "5nDLZ5OJBdyY3NoPiZGx0Ag" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 3ae84308bf..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark logs, a cryptic, hidden trace,\nOf failing systems, lost in time and space.\nOpenTelemetry, a beacon shining bright,\nIlluminating paths, both dark and light.\n\nFrom microservices, a sprawling, tangled mesh,\nTo monolithic beasts, put to the test,\nIt gathers traces, spans, and metrics too,\nA holistic view, for me and you.\n\nWith signals clear, from every single node,\nPerformance bottlenecks, instantly bestowed.\nDistributed tracing, paints a vivid scene,\nWhere latency lurks, and slowdowns intervene.\n\nExporters rise, to send the data forth,\nTo dashboards grand, of proven, measured worth.\nPrometheus, Grafana, Jaeger, fluent streams,\nVisualizing insights, fulfilling data dreams.\n\nFrom Jaeger's diagrams, a branching, flowing art,\nTo Grafana's charts, that play a vital part,\nThe mysteries unravel, hidden deep inside,\nWhere errors slumber, and slow responses hide.\n\nSo hail OpenTelemetry, a gift to all who code,\nA brighter future, on a well-lit road.\nNo more guesswork, no more fruitless chase,\nJust clear observability, in time and space.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.45532724261283875 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 256, - "totalTokenCount": 264, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 256 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:15.268428Z", - "responseId": "43DLZ4yxEM6F3NoPzaTkiQU" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..68fe1978e2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In tangled clouds, where microservices roam,\nAnd data streams find a circuitous home,\nA dark abyss, where errors often hide,\nWith mysteries deep, there's nowhere to confide.\n\nBut from the void, a standard takes its flight,\nTo bring the hidden processes to light.\nOpenTelemetry, a unified design,\nA beacon in the digital divine.\n\nWith traces keen, a journey we embark,\nEach operation leaves its vital mark.\nFrom service entry to a database's keep,\nContext propagated, secrets now don't sleep.\n\nThen metrics gleam, in charts and graphs displayed,\nResponse times measured, no detail delayed.\nThrough counters, gauges, histograms so precise,\nPerformance whispered, at a measurable price.\n\nAnd logs, the tales of moments as they pass,\nDiscrete events within the digital glass.\nWith structured wisdom, messages unfold,\nA narrative of what the systems hold.\n\nThrough instrumentation, code begins to speak,\nA common language that all systems seek.\nThe Collector waits, a sentinel so grand,\nTo process, batch, and send across the land.\n\nNo vendor binds, no proprietary chain,\nBut open standards, free from selfish gain.\nA universal tongue, for insight to arise,\nReflecting truth before observing eyes.\n\nFrom scattered signals, patterns now emerge,\nTo quell the chaos, stem the error's surge.\nWith deep observability, we understand,\nThe intricate dance across the digital sand.\n\nSo hail this standard, built with thoughtful grace,\nTo light the corners of computing space.\nOpenTelemetry, a steadfast, vital guide,\nWhere understanding and precision ride." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -2.298871321801128 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 349, + "totalTokenCount": 2326, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 349 + } + ], + "thoughtsTokenCount": 1969 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:52.185046Z", + "responseId": "XOXjaNalC4-nnvgPsaPcuAw" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:51:05 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..5f8608e42d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In realms where code profoundly weaves its might,\nAnd services converse in hurried streams,\nA common tongue emerges, clear and bright,\nTo shed its light upon our digital dreams.\n\nNo longer hidden, lost in labyrinthine ways,\nThe secret life of systems, now revealed,\nThrough OpenTelemetry's enlightened gaze,\nA universal language, fairly wielded.\n\nA **Trace** begins, a journey's golden thread,\nFrom distant client to the deepest core,\nEach tiny \"span,\" a word that has been said,\nMapping the path, from shore to server's shore.\nContext propagated, like a gentle breeze,\nAcross the network, through the busy maze,\nConnecting dots with elegant, calm ease,\nUnveiling time, in swift or lingering phases.\n\nThen **Metrics** pulse, a steady, rhythmic beat,\nThe CPU's hum, the memory's steady climb,\nLatency's dance, both bitter and so sweet,\nA quantitative pulse, defying time.\nCounters tally, gauges hold their ground,\nHistograms paint distributions, clear and bold,\nThe health of systems, constantly profound,\nA silent story, in their numbers told.\n\nAnd finally, the **Logs**, a detailed scroll,\nEach event marked, a narrative precise,\nWhat happened where, beyond our own control,\nA key to debug, a moment's wise advice.\nStructured records, searchable and plain,\nThey whisper errors, or triumphant news,\nA chronological, unwavering refrain,\nUnlocking insights, banishing confusions.\n\nFrom humble agents, nestled in the code,\nTo collectors standing, vigilant and keen,\nThe data flows, a precious, heavy load,\nTo any backend, for display and screen.\nVendor-neutral, open, strong, and free,\nIt breaks the silos, tears the walls apart,\nFor true observability's decree,\nTo understand the system's beating heart.\n\nSo hail this standard, built with thoughtful hand,\nFor those who build, and those who seek to mend,\nOpenTelemetry, across the land,\nOur guiding light, until the journey's end." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -2.4816789437603477 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 453, + "totalTokenCount": 2468, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 453 + } + ], + "thoughtsTokenCount": 2007 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:35.682132Z", + "responseId": "S-XjaJTRKdO6nvgPnMuyoQs" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:50:51 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index 77e985bf28..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark, mysterious traces,\nNo more guessing, in empty spaces.\nOpenTelemetry's light now shines,\nIlluminating all our designs.\n\nFrom microservices, small and fleet,\nTo monolithic beasts, hard to beat,\nIt weaves a net, both fine and strong,\nWhere metrics flow, where logs belong.\n\nTraces dance, a vibrant hue,\nShowing journeys, old and new.\nSpans unfold, a story told,\nOf requests handled, brave and bold.\n\nMetrics hum, a steady beat,\nLatency, errors, can't be beat.\nDistribution charts, a clear display,\nGuiding us along the way.\n\nLogs provide a detailed view,\nOf what happened, me and you.\nContext rich, with helpful clues,\nDebugging woes, it quickly subdues.\n\nWith exporters wise, a thoughtful choice,\nTo Prometheus, Jaeger, or Zipkin's voice,\nOur data flows, a precious stream,\nReal-time insights, a waking dream.\n\nSo hail to OpenTelemetry's might,\nBringing clarity to our darkest night.\nObservability's champion, bold and true,\nA brighter future, for me and you.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.4071464086238575 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 253, - "totalTokenCount": 261, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 253 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:12.443989Z", - "responseId": "4HDLZ9WMG6SK698Pr5uZ2Qw" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 7d3d7a56b2..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark, mysterious traces,\nOf failing systems, hidden spaces.\nOpen Telemetry's light shines bright,\nGuiding us through the darkest night.\n\nFrom metrics gathered, finely spun,\nTo logs that tell of tasks undone,\nAnd traces linking every call,\nIt answers questions, standing tall.\n\nDistributed systems, complex and vast,\nTheir hidden flaws, no longer cast\nIn shadows deep, beyond our view,\nOpen Telemetry sees them through.\n\nWith spans and attributes, it weaves a tale,\nOf requests flowing, never frail.\nIt pinpoints bottlenecks, slow and grim,\nAnd helps us optimize, system trim.\n\nAcross languages, a common ground,\nWhere data's shared, and insights found.\nExporters whisper, collectors hum,\nA symphony of data, overcome.\n\nSo raise a glass, to this open source,\nA shining beacon, a powerful force.\nOpen Telemetry, a guiding star,\nRevealing secrets, near and far.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.3586180628193498 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 211, - "totalTokenCount": 219, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 211 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:09.936326Z", - "responseId": "3XDLZ4aTOZSpnvgPn-e0qQk" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..e717acd482 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In realms of code, where microservices gleam,\nA hidden flow, a silent, complex stream.\nThe journey lost, where data takes its flight,\nA developer's plea for dawning light.\n\nThen from the cloud, a standard takes its stand,\nWith open heart, across the digital land.\nOpenTelemetry, a beacon strong and true,\nTo pierce the fog, and bring the facts to view.\n\nA Trace begins, a story to unfold,\nThrough every service, brave and strong and bold.\nWith Spans entwined, a parent and its child,\nThe call graph painted, nevermore beguiled.\n\nThen Metrics rise, in numbers clear and plain,\nThe count of errors, or the joyful gain.\nA gauge of load, a histogram's wide spread,\nThe health of systems, clearly to be read.\n\nAnd Logs arrive, with context rich and deep,\nThe structured message, secrets they don't keep.\nEach line a moment, with data to attest,\nA vital clue, to pass the crucial test.\n\nNo vendor lock, no proprietary chain,\nBut open hands, a collaborative domain.\nA unified approach, for all to understand,\nThe data flowing, free across the land.\n\nWith SDKs bright, in languages diverse,\nYou instrument your code, a crucial verse.\nAttach the agents, where the queries run,\nThe observation journey, has begun.\n\nThe Collector waits, a gatherer so keen,\nTo process data, clean and sharp and lean.\nTransform and batch, then send it far away,\nTo chosen backend, come what may.\n\nSo dark no longer, where the errors hide,\nBut clarity shines, your systems open wide.\nThe 'why' revealed, the latency explained,\nWith OTel's power, mastery is gained.\n\nFor engineers who seek to truly see,\nThe inner workings, wild and fiercely free.\nEmbrace the standard, let its power ignite,\nOpenTelemetry, your guiding, digital light." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -1.5871007582720589 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 425, + "totalTokenCount": 2470, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 425 + } + ], + "thoughtsTokenCount": 2037 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:21.557646Z", + "responseId": "PeXjaM6EItur3NoPkYPI2A4" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:50:34 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..09a734361f --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In lands of code, where microservices roam,\nAnd hidden pathways lead our data home,\nWhere errors lurk and mysteries reside,\nA guiding light we desperately confide.\n\nThen from the open source, a beacon gleams,\nUnifying visions, waking dormant dreams.\nNo longer bound by vendor's watchful hand,\nOpenTelemetry, across the digital land.\n\nIt weaves the **Traces**, threads of golden light,\nThrough every service, in the darkest night.\nEach **Span** a step, a function's brief embrace,\nRevealing journeys, time, and hidden space.\n\nThen **Metrics** rise, in counters and in gauges,\nPerformance stories, told on countless pages.\nCPU hum, latency's slow tide,\nThe health of systems, nowhere left to hide.\n\nAnd **Logs**, the whispers from each busy node,\nA chronicle of actions, burdens freely showed.\nFrom info messages to errors stark and plain,\nContextual wisdom, banishing the pain.\n\nWith SDKs and agents, code aware it grows,\nThrough sturdy Collectors, the data swiftly flows.\nTo chosen backends, be they new or old,\nA unified stream, a story to unfold.\n\nNo longer blind, to whispers in the dark,\nWe find the culprits, leaving their dark mark.\nUnderstand the flow, the logic and the strain,\nAnd build with confidence, again and again.\n\nSo hail the standard, vibrant, strong, and free,\nFor every system, for you, for me.\nOpenTelemetry, a power understood,\nIlluminating darkness, for the common good." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -1.8066022087545957 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 340, + "totalTokenCount": 2275, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 340 + } + ], + "thoughtsTokenCount": 1927 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:08.533649Z", + "responseId": "MOXjaJHJIPP02PgP1dnn0Qo" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:50:20 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index a946911c36..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,97 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" longer dark, the tracing's light,\\nOpen Telemetry, shining\ - \ bright\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \".\\nA beacon in the coding night,\\nRevealing paths, both\ - \ dark\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ - 2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" and bright.\\n\\nFrom microservice to sprawling beast,\\\ - nIts watchful eye, a silent priest.\\nObserving calls, both small and vast,\\\ - nPerformance\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" flaws, revealed at last.\\n\\nWith metrics gleaned and logs\ - \ aligned,\\nA clearer picture, you will find.\\nOf latency, and errors dire,\\\ - n\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\ - ,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"And bottlenecks\ - \ that set afire.\\n\\nIt spans the clouds, a network wide,\\nWhere data streams,\ - \ a surging tide.\\nCollecting traces, rich and deep,\\nWhile slumbering apps\ - \ their secrets keep.\\n\\nJaeger, Zip\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"kin, the tools it holds,\\nA tapestry of stories told.\\nOf\ - \ requests flowing, swift and free,\\nOr tangled knots, for all to see.\\\ - n\\nSo embrace the power, understand,\\nThe vital role, across the\"}]}}],\"\ - modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\ - ,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" land.\\nOpen\ - \ Telemetry, a guiding star,\\nTo navigate the digital afar.\\n\"}]},\"finishReason\"\ - : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ - : 212,\"totalTokenCount\": 220,\"promptTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 212}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\ - createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 647a76b80a..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,102 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"The\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" black box whispers, secrets deep,\\nOf failing systems, promises\ - \ to keep.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\nBut tracing's light, a guiding hand,\\nReveals the path\"\ - }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", across the\ - \ land.\\n\\nOpen Telemetry, a beacon bright,\\nIlluminating pathways, day\ - \ and night.\\nFrom spans and traces, stories told,\"}]}}],\"modelVersion\"\ - : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nOf requests\ - \ flowing, brave and bold.\\n\\nThe metrics rise, a vibrant chart,\\nDisplaying\ - \ latency, a work of art.\\nEach request'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"s journey, clearly shown,\\nWhere bottlenecks slumber, seeds\ - \ are sown.\\n\\nWith logs appended, context clear,\\nThe root of problems,\ - \ drawing near.\\nObservability's embrace, so wide,\\nUnraveling mysteries,\"\ - }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" deep inside.\\\ - n\\nFrom simple apps to complex weaves,\\nOpen Telemetry's power achieves,\\\ - nA unified vision, strong and true,\\nMonitoring systems, old and new.\\n\\\ - nNo vendor lock-in, free to roam,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\nAcross the clouds, and find your home.\\nA standard rising,\ - \ strong and bold,\\nA future brighter, to behold.\\n\\nSo let the traces\ - \ flow and gleam,\\nOpen Telemetry, a vibrant dream.\\nOf healthy systems,\ - \ running free,\\nFor all to see, for all to be.\"}]}}],\"modelVersion\":\ - \ \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\n\"}]},\"\ - finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"\ - candidatesTokenCount\": 258,\"totalTokenCount\": 266,\"promptTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 258}]},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..b09dd31662 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In realms of code, where microservices bloom,\\nA tangled\ + \ web, obscured by digital gloom.\\nWhen errors hide, and latency takes hold,\\\ + nA story lost, a mystery untold.\\n\\nThen from the depths, a guiding light\ + \ appears,\\nTo banish doubt and quell developers' fears.\\n**OpenTelemetry**,\"\ + }]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"\ + responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a standard, bold\ + \ and free,\\nUnifying sight for all the world to see.\\n\\nThrough vast distributed\ + \ lands, it weaves a thread,\\nFrom start to finish, where each packet's led.\\\ + nWith **spans** and parentage, a call stack clear,\\nThe journey's path, dismissing\ + \ every fear.\\n\\n\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\"\ + ,\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"Then **metrics**\ + \ rise, a pulse, a steady beat,\\nOf CPU, memory, and every server's heat.\\\ + nLatency's dance, throughput's flowing tide,\\nIn charts and graphs, where\ + \ vital truths reside.\\n\\nAnd **logs**, the tales of action, word by word,\\\ + n\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"\ + responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \"Each event recorded,\ + \ faithfully heard.\\n**Context attached**, correlation strong,\\nWhere truth\ + \ unfolds, and nothing can go wrong.\\n\\nNo vendor lock, no proprietary chain,\\\ + nJust **open standards**, easing every pain.\\nWith SDKs to **instrument**\ + \ your plea,\\nAnd **collectors** gathering, for all to see.\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\n\\nSo clarity descends, where once was night,\\nA system's\ + \ health, revealed in shining light.\\nFor dev and ops, a shared and common\ + \ tongue,\\n**OpenTelemetry**, its virtues widely sung.\"}]},\"finishReason\"\ + : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ + : 309,\"totalTokenCount\": 1779,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 309}],\"thoughtsTokenCount\": 1462},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\"\ + ,\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:52 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..4f98f0102d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,109 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In realms of code, where logic weaves its way,\\nAnd microservices\ + \ in silent dance hold sway,\\nA shadowed maze, where errors often hide,\\\ + nAnd mysteries within the darkness ride.\\n\\nThen from the cloud, a guiding\ + \ star takes flight,\\nOpenTelemetry, bringing forth the light.\\nA universal\ + \ tongue, for systems\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ + ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" to embrace,\\\ + nTo chart the journey, through time and space.\\n\\nIt paints the **Traces**,\ + \ a thread of golden gleam,\\nEach request's passage, a detailed, vital dream.\\\ + nFrom source to endpoint, through every hop it flows,\\nThe parent-child relationships,\ + \ every service knows.\\n\\nThen\"}]}}],\"usageMetadata\": {\"trafficType\"\ + : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ + 2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" **Metrics** rise, with numbers sharp and clear,\\nThe CPU's\ + \ heartbeat, the latency we fear.\\nCounts, gauges, histograms, precise and\ + \ true,\\nPerformance secrets, brought right into view.\\n\\nAnd **Logs**,\ + \ the tales each crucial moment tells,\\nContextual details, from the deepest\ + \ cells.\\nWith\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ + ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" trace IDs\ + \ tied, a story now complete,\\nNo isolated entry, bitter-sweet.\\n\\nThrough\ + \ agents calm, the Collector takes its stand,\\nGathering wisdom, from every\ + \ corner of the land.\\nVendor-neutral standard, a freedom it bestows,\\nOn\ + \ how to see, and where the knowledge goes.\\n\\nNo\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" longer guessing, in the dark we grope,\\nBut paths illuminated,\ + \ sparking future hope.\\nFor engineers and SREs, a tool so grand,\\nTo understand\ + \ the landscape, fully in command.\\n\\nSo hail to OTel, its vision strong\ + \ and wide,\\nOur system's heartbeat, can no longer hide\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \".\\nA canvas drawn, where every pixel gleams,\\nThe living\ + \ tapestry of all our digital dreams.\"}]},\"finishReason\": \"STOP\"}],\"\ + usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\": 353,\"\ + totalTokenCount\": 1932,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 353}],\"thoughtsTokenCount\": 1571},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ + ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:41 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index 9a068aae89..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,99 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" more dark logs, a cryptic, silent scream,\\nNo more the hunt\ - \ for\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ - 2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" errors, a lost, fading dream.\\nOpen Telemetry, a beacon\ - \ in\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ - 2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" the night,\\nShining forth its data, clear and burning bright.\\\ - n\\nFrom traces spanning systems, a flowing, silver thread,\\nMetrics pulse\ - \ and measure,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" insights finely spread.\\nLogs enriched with context, a story\ - \ they unfold,\\nOf requests and responses, both brave and bold.\\n\\nObservability's\ - \ promise\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \", a future now at hand,\\nWith vendors interoperable, a collaborative\ - \ band.\\nNo longer vendor lock-in, a restrictive, iron cage,\\nBut freedom\ - \ of selection, turning a new page.\\n\\nFrom microservices humming,\"}]}}],\"\ - modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ - ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a symphony\ - \ of calls,\\nTo monolithic giants, answering their thralls,\\nOpen Telemetry\ - \ watches, with keen and watchful eye,\\nDetecting the anomalies, before they\ - \ rise and fly.\\n\\nSo let the data flow freely, a\"}]}}],\"modelVersion\"\ - : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ - ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" river strong\ - \ and deep,\\nIts secrets it will whisper, while the systems sleep.\\nOpen\ - \ Telemetry's power, a force that we can wield,\\nTo build more stable systems,\ - \ in the digital field.\\n\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\ - : {\"promptTokenCount\": 8,\"candidatesTokenCount\": 238,\"totalTokenCount\"\ - : 246,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"\ - candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 238}]},\"\ - modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ - ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 669f1af93b..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,99 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" more dark, mysterious traces,\\nNo more guessing, in time\ - \ and spaces.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\nOpen Telemetry's light shines bright,\\nIlluminating the\ - \ code'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\":\ - \ \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"s dark night.\\n\\nFrom spans and metrics, a story told,\\\ - nOf requests flowing, both brave and bold.\\nTraces weaving, a tapestry grand,\"\ - }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\ - ,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nShowing\ - \ performance, across the land.\\n\\nLogs and metrics, a perfect blend,\\\ - nInformation's flow, without end.\\nObservability's promise\"}]}}],\"modelVersion\"\ - : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\ - ,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", clear and\ - \ true,\\nInsights revealed, for me and you.\\n\\nJaeger, Zipkin, a chorus\ - \ sings,\\nWith exporters ready, for all the things.\\nFrom simple apps to\ - \ systems vast,\\nOpen Telemetry'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"s power will last.\\n\\nNo vendor lock-in, a freedom sweet,\\\ - nOpen source glory, can't be beat.\\nSo let us embrace, this modern way,\\\ - nTo monitor systems, come what may.\\n\\nFrom\"}]}}],\"modelVersion\": \"\ - gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"\ - responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\": [{\"\ - content\": {\"role\": \"model\",\"parts\": [{\"text\": \" microservices, small\ - \ and slight,\\nTo monolithic giants, shining bright,\\nOpen Telemetry shows\ - \ the path,\\nTo understand, and fix the wrath,\\nOf latency demons, lurking\ - \ near,\\nBringing clarity, year after year.\\n\"}]},\"finishReason\": \"\ - STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ - : 242,\"totalTokenCount\": 250,\"promptTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 242}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\ - createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..e06f1689d3 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,123 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In realms of code, where microservices roam,\\nA digital maze,\ + \ far from a single home.\\nWhere calls interweave, and logic softly hums,\\\ + nHow know the path, when trouble swiftly comes?\\nA black box deep, with secrets\ + \ held inside,\\nWhere errors lurk, and issues try to hide\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \".\\n\\nThen a beacon shines, a guiding, steady hand,\\nA common\ + \ tongue, across the digital land.\\n**OpenTelemetry**, its promise clear,\ + \ for all to see,\\nA standard set, for observability.\\nNo vendor lock, but\ + \ freedom's open gate,\\nTo understand, before it is too late.\\n\\n\"}]}}],\"\ + usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"A **trace** begins, a thread of light unfurled,\\nThrough\ + \ services vast, across the cyber world.\\nEach interaction, a **span** along\ + \ the way,\\nFrom user's click, to final, rendered day.\\nA parent's call,\ + \ a child's reply so swift,\\nA\"}]}}],\"usageMetadata\": {\"trafficType\"\ + : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ + 2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" narrative woven, an insightful gift.\\n\\nThe **metrics**\ + \ hum, the system's steady beat,\\nA pulse of health, or looming, cold defeat.\\\ + nCPU's quick breath, memory's deep embrace,\\nRequest rates soar, or slow\ + \ to crawling pace.\\nCounters tick, and gauges softly sway,\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\nRevealing truths, at close of every day.\\n\\nAnd **logs**\ + \ record, the whispers from the core,\\nEach critical event, and nothing less,\ + \ or more.\\nA timestamped tale, of what transpired within,\\nWhere errors\ + \ bloomed, or victories did win.\\nThe full context shared, with every\"}]}}],\"\ + usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" single line,\\nA detailed story, perfectly aligned.\\n\\\ + nThrough **context propagation**, threads entwine,\\nAcross boundaries, making\ + \ the design\\nCoherent, whole, where once were gaping cracks,\\nThe full\ + \ transaction, on its varied tracks.\\nThe **Collector** stands, a guardian\ + \ at the gate,\\nTo gather\"}]}}],\"usageMetadata\": {\"trafficType\": \"\ + ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:18.506243Z\"\ + ,\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" streams, before\ + \ it is too late.\\nTransform, enrich, then send where insights bloom,\\nDispelling\ + \ shadows, chasing back the gloom.\\n\\nSo gaze upon the truth, in light so\ + \ clear,\\nNo longer guessing, banishing all fear.\\nWith OpenTelemetry, the\ + \ path is known,\\nThe digital future, gracefully is sown\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \".\\nFor those who build, and those who bravely run,\\nA deeper\ + \ vision, 'til the work is done.\\nThe complexity tamed, the chaos brought\ + \ to heel,\\nThe heart of systems, finally revealed.\"}]},\"finishReason\"\ + : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ + : 514,\"totalTokenCount\": 2270,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 514}],\"thoughtsTokenCount\": 1748},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:18.506243Z\"\ + ,\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:29 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..8b63b05da6 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,109 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In labyrinthine code, where shadows creep,\\nA million microservices,\ + \ secrets keep.\\nPerformance woes, a phantom's silent hand,\\nAcross the\ + \ digital, a darkened land.\\n\\nThen from the mist, a beacon starts to gleam,\\\ + nOpenTelemetry, a waking dream.\\nA common tongue, for data,\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" vast and deep,\\nThe promises of clarity to keep.\\n\\nThe\ + \ trace unfurls, a golden thread so fine,\\nFrom user click, to database divine.\\\ + nEach span a step, a function's hurried breath,\\nUnveiling latency, and coding's\ + \ death.\\n\\nWith metrics gathered, pulses start to\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" beat,\\nCPU hums, and memory's hot seat.\\nRequests per second,\ + \ errors on the rise,\\nA living dashboard, open to our eyes.\\n\\nAnd humble\ + \ logs, with details stark and clear,\\nEvents recorded, banishing all fear.\\\ + nThe story told, of what transpired within,\\nA\"}]}}],\"usageMetadata\":\ + \ {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" whispered secret, where issues begin.\\n\\nThrough context\ + \ woven, links are forged anew,\\nA tapestry of truth, for me and you.\\nNo\ + \ isolated fragment, lost and lone,\\nBut part of history, clearly shown.\\\ + n\\nThe root cause found, where once was dark despair,\\nPerformance tuned,\ + \ beyond all earthly care\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:06.600201Z\"\ + ,\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \".\\nA system\ + \ known, its whispers and its roar,\\nObservability, forevermore.\\n\\nNo\ + \ vendor locked, no single path to take,\\nAn open standard, for all our systems'\ + \ sake.\\nA community's embrace, robust and wide,\\nWhere wisdom flows, with\ + \ nothing left to hide.\\n\\nSo\"}]}}],\"usageMetadata\": {\"trafficType\"\ + : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ + 2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" hail the tools, that bring the light to bear,\\nOn tangled\ + \ webs, and burdens we must share.\\nOpenTelemetry, a guide so true,\\nRevealing\ + \ insights, shining ever new.\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\ + : {\"promptTokenCount\": 8,\"candidatesTokenCount\": 370,\"totalTokenCount\"\ + : 1904,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\": [{\"modality\"\ + : \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\"\ + : \"TEXT\",\"tokenCount\": 370}],\"thoughtsTokenCount\": 1526},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"\ + responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:16 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 039d077b72..5c7e683456 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -17,6 +17,7 @@ from unittest.mock import patch from google.genai.types import GenerateContentConfig + from opentelemetry._events import Event from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, @@ -123,7 +124,10 @@ def test_generated_span_counts_tokens(self): self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123) self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456) - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_records_system_prompt_as_log(self): config = {"system_instruction": "foo"} self.configure_valid_response() @@ -135,7 +139,10 @@ def test_records_system_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "foo") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}, + ) def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): config = {"system_instruction": "foo"} self.configure_valid_response() @@ -147,7 +154,10 @@ def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present( self, ): @@ -155,7 +165,10 @@ def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present( self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_does_not_have_event_named("gen_ai.system.message") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_records_user_prompt_as_log(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -164,7 +177,10 @@ def test_records_user_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "Some input") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}, + ) def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -173,7 +189,10 @@ def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_records_response_as_log(self): self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -184,7 +203,10 @@ def test_records_response_as_log(self): "Some response content", json.dumps(event_record.body["content"]) ) - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}, + ) def test_does_not_record_response_as_log_if_disabled_by_env(self): self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -211,30 +233,92 @@ def test_new_semconv_record_completion_as_log(self): content = "Some input" output = "Some response content" sys_instr = "System instruction" - with self.subTest(f"mode: {mode}", patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: self.configure_valid_response(text=output) - self.generate_content(model="gemini-2.0-flash", contents=content, config=GenerateContentConfig(system_instruction=sys_instr)) - self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") - event = self.otel.get_event_named("gen_ai.client.inference.operation.details") + self.generate_content( + model="gemini-2.0-flash", + contents=content, + config=GenerateContentConfig( + system_instruction=sys_instr + ), + ) + self.otel.assert_has_event_named( + "gen_ai.client.inference.operation.details" + ) + event = self.otel.get_event_named( + "gen_ai.client.inference.operation.details" + ) if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.SPAN_ONLY, ]: - self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, event.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, event.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, event.attributes) + self.assertNotIn( + gen_ai_attributes.GEN_AI_INPUT_MESSAGES, + event.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, + event.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, + event.attributes, + ) else: attrs = { - gen_ai_attributes.GEN_AI_INPUT_MESSAGES: ({"role": "user", "parts": ({"content": content, "type": "text"},)},), - gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: ({"role": "assistant", "parts": ({"content": output, "type": "text"},), "finish_reason": ""},), - gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: ({"content": sys_instr, "type": "text"},) + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: ( + { + "role": "user", + "parts": ( + {"content": content, "type": "text"}, + ), + }, + ), + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: ( + { + "role": "assistant", + "parts": ( + {"content": output, "type": "text"}, + ), + "finish_reason": "", + }, + ), + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: ( + {"content": sys_instr, "type": "text"}, + ), } - expected_event = Event("gen_ai.client.inference.operation.details", attributes=attrs) - self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES]) - self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES]) - self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], expected_event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS]) + expected_event = Event( + "gen_ai.client.inference.operation.details", + attributes=attrs, + ) + self.assertEqual( + event.attributes[ + gen_ai_attributes.GEN_AI_INPUT_MESSAGES + ], + expected_event.attributes[ + gen_ai_attributes.GEN_AI_INPUT_MESSAGES + ], + ) + self.assertEqual( + event.attributes[ + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES + ], + expected_event.attributes[ + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES + ], + ) + self.assertEqual( + event.attributes[ + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS + ], + expected_event.attributes[ + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS + ], + ) self.tearDown() def test_new_semconv_record_completion_in_span(self): @@ -252,23 +336,57 @@ def test_new_semconv_record_completion_in_span(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: self.configure_valid_response(text="Some response content") - self.generate_content(model="gemini-2.0-flash", contents="Some input", config=GenerateContentConfig(system_instruction="System instruction")) - span = self.otel.get_span_named("generate_content gemini-2.0-flash") + self.generate_content( + model="gemini-2.0-flash", + contents="Some input", + config=GenerateContentConfig( + system_instruction="System instruction" + ), + ) + span = self.otel.get_span_named( + "generate_content gemini-2.0-flash" + ) if mode in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') - self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') - self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], '[{"content": "System instruction", "type": "text"}]') + self.assertEqual( + span.attributes[ + gen_ai_attributes.GEN_AI_INPUT_MESSAGES + ], + '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]', + ) + self.assertEqual( + span.attributes[ + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES + ], + '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]', + ) + self.assertEqual( + span.attributes[ + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS + ], + '[{"content": "System instruction", "type": "text"}]', + ) else: - self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, span.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, span.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, span.attributes) + self.assertNotIn( + gen_ai_attributes.GEN_AI_INPUT_MESSAGES, + span.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, + span.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, + span.attributes, + ) self.tearDown() diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index afe4dbfe6b..15fdd3dd3e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -35,6 +35,7 @@ import google.genai import pytest import yaml +from google.genai import types from vcr.record_mode import RecordMode from opentelemetry.instrumentation.google_genai import ( @@ -389,7 +390,13 @@ def fixture_gcloud_api_key(gemini_api_key): @pytest.fixture(name="nonvertex_client_factory") def fixture_nonvertex_client_factory(gemini_api_key): def _factory(): - return google.genai.Client(api_key=gemini_api_key, vertexai=False) + return google.genai.Client( + api_key=gemini_api_key, + vertexai=False, + http_options=types.HttpOptions( + headers={"accept-encoding": "identity"} + ), + ) return _factory @@ -404,6 +411,9 @@ def _factory(): project=gcloud_project, location=gcloud_location, credentials=gcloud_credentials, + http_options=types.HttpOptions( + headers={"accept-encoding": "identity"} + ), ) return _factory @@ -435,7 +445,7 @@ def fixture_is_async(request): return request.param == "async" -@pytest.fixture(name="model", params=["gemini-1.5-flash-002"]) +@pytest.fixture(name="model", params=["gemini-2.5-flash"]) def fixture_model(request): return request.param diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py index 7439944e37..501519d4ff 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py @@ -15,6 +15,7 @@ from unittest.mock import patch import google.genai.types as genai_types + from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, @@ -297,7 +298,9 @@ def test_new_semconv_tool_calls_record_parameter_values(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: calls = [] @@ -307,7 +310,11 @@ def handle(*args, **kwargs): return "some result" def somefunction(someparam, otherparam=2): - print("someparam=%s, otherparam=%s", someparam, otherparam) + print( + "someparam=%s, otherparam=%s", + someparam, + otherparam, + ) self.mock_generate_content.side_effect = handle self.client.models.generate_content( @@ -322,8 +329,12 @@ def somefunction(someparam, otherparam=2): tools = config.tools wrapped_somefunction = tools[0] wrapped_somefunction(123, otherparam="abc") - self.otel.assert_has_span_named("execute_tool somefunction") - generated_span = self.otel.get_span_named("execute_tool somefunction") + self.otel.assert_has_span_named( + "execute_tool somefunction" + ) + generated_span = self.otel.get_span_named( + "execute_tool somefunction" + ) self.assertEqual( generated_span.attributes[ "code.function.parameters.someparam.type" @@ -340,11 +351,27 @@ def somefunction(someparam, otherparam=2): ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - self.assertEqual(generated_span.attributes["code.function.parameters.someparam.value"], 123) - self.assertEqual(generated_span.attributes["code.function.parameters.otherparam.value"], "abc") + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.someparam.value" + ], + 123, + ) + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.otherparam.value" + ], + "abc", + ) else: - self.assertNotIn("code.function.parameters.someparam.value", generated_span.attributes) - self.assertNotIn("code.function.parameters.otherparam.value", generated_span.attributes) + self.assertNotIn( + "code.function.parameters.someparam.value", + generated_span.attributes, + ) + self.assertNotIn( + "code.function.parameters.otherparam.value", + generated_span.attributes, + ) self.tearDown() def test_new_semconv_tool_calls_record_return_values(self): @@ -362,7 +389,9 @@ def test_new_semconv_tool_calls_record_return_values(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: calls = [] @@ -387,20 +416,27 @@ def somefunction(x, y=2): tools = config.tools wrapped_somefunction = tools[0] wrapped_somefunction(123) - self.otel.assert_has_span_named("execute_tool somefunction") - generated_span = self.otel.get_span_named("execute_tool somefunction") + self.otel.assert_has_span_named( + "execute_tool somefunction" + ) + generated_span = self.otel.get_span_named( + "execute_tool somefunction" + ) self.assertEqual( - generated_span.attributes["code.function.return.type"], "int" + generated_span.attributes["code.function.return.type"], + "int", ) if mode in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: self.assertIn( - "code.function.return.value", generated_span.attributes + "code.function.return.value", + generated_span.attributes, ) else: self.assertNotIn( - "code.function.return.value", generated_span.attributes + "code.function.return.value", + generated_span.attributes, ) self.tearDown() diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py index 6bfa53cf5e..7f92aade7e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py @@ -17,13 +17,17 @@ from unittest.mock import patch from google.genai import types as genai_types + from opentelemetry._events import get_event_logger_provider from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, _StabilityMode, ) -from opentelemetry.instrumentation.google_genai import otel_wrapper, tool_call_wrapper +from opentelemetry.instrumentation.google_genai import ( + otel_wrapper, + tool_call_wrapper, +) from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider from opentelemetry.util.genai.types import ContentCapturingMode @@ -299,19 +303,29 @@ def somefunction(arg=None): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: wrapped_somefunction = self.wrap(somefunction) wrapped_somefunction(12345) - span = self.otel.get_span_named("execute_tool somefunction") + span = self.otel.get_span_named( + "execute_tool somefunction" + ) if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.EVENT_ONLY, ]: - self.assertNotIn("code.function.parameters.arg.value", span.attributes) + self.assertNotIn( + "code.function.parameters.arg.value", + span.attributes, + ) else: - self.assertIn("code.function.parameters.arg.value", span.attributes) + self.assertIn( + "code.function.parameters.arg.value", + span.attributes, + ) self.tearDown() From 05bed52c7775463226c026efcc5d4734850f94e0 Mon Sep 17 00:00:00 2001 From: Dylan Russell Date: Mon, 6 Oct 2025 16:06:30 +0000 Subject: [PATCH 19/34] fix lint issues --- .../instrumentation/google_genai/generate_content.py | 1 + .../tests/generate_content/test_tool_call_instrumentation.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 60d6136e4e..796c267cff 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=too-many-lines import copy import dataclasses diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py index 501519d4ff..78eb642c60 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py @@ -285,6 +285,7 @@ def somefunction(x, y=2): def test_new_semconv_tool_calls_record_parameter_values(self): for mode in ContentCapturingMode: + calls = [] patched_environ = patch.dict( "os.environ", { @@ -303,7 +304,6 @@ def test_new_semconv_tool_calls_record_parameter_values(self): ): self.setUp() with patched_environ, patched_otel_mapping: - calls = [] def handle(*args, **kwargs): calls.append((args, kwargs)) @@ -376,6 +376,7 @@ def somefunction(someparam, otherparam=2): def test_new_semconv_tool_calls_record_return_values(self): for mode in ContentCapturingMode: + calls = [] patched_environ = patch.dict( "os.environ", { @@ -394,7 +395,6 @@ def test_new_semconv_tool_calls_record_return_values(self): ): self.setUp() with patched_environ, patched_otel_mapping: - calls = [] def handle(*args, **kwargs): calls.append((args, kwargs)) From ab39602cdbebbf41cae011778fcfb325a57d0686 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 8 Oct 2025 13:37:26 +0000 Subject: [PATCH 20/34] fix: aggregate streaming messages into one event. --- .../google_genai/generate_content.py | 99 ++++++++++--------- 1 file changed, 52 insertions(+), 47 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 796c267cff..2498584ffe 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -382,8 +382,7 @@ def process_completion( config: Optional[GenerateContentConfigOrDict] = None, ): self._update_response(response) - self._maybe_log_completion_details(request, response, config) - self._response_index += 1 + self._maybe_log_completion_details(request, response.candidates or [], config) def process_error(self, e: Exception): self._error_type = str(e.__class__.__name__) @@ -466,7 +465,7 @@ def _maybe_update_error_type(self, response: GenerateContentResponse): def _maybe_log_completion_details( self, request: Union[ContentListUnion, ContentListUnionDict], - response: GenerateContentResponse, + candidates: list[Candidate], config: Optional[GenerateContentConfigOrDict] = None, ): attributes = { @@ -481,7 +480,7 @@ def _maybe_log_completion_details( contents=transformers.t_contents(request) ) output_messages = to_output_messages( - candidates=response.candidates or [] + candidates=candidates ) span = trace.get_current_span() @@ -791,6 +790,7 @@ def instrumented_generate_content_stream( config: Optional[GenerateContentConfigOrDict] = None, **kwargs: Any, ) -> Iterator[GenerateContentResponse]: + candidates: list[Candidate] = [] helper = _GenerateContentInstrumentationHelper( self, otel_wrapper, @@ -818,7 +818,9 @@ def instrumented_generate_content_stream( helper.sem_conv_opt_in_mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL ): - helper.process_completion(contents, response, config) + helper._update_response(response) + if response.candidates: + candidates += response.candidates else: raise ValueError( f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." @@ -828,6 +830,7 @@ def instrumented_generate_content_stream( helper.process_error(error) raise finally: + helper._maybe_log_completion_details(contents, candidates, config) helper.finalize_processing() return instrumented_generate_content_stream @@ -923,50 +926,52 @@ async def instrumented_generate_content_stream( end_on_exit=False, ) as span: helper.add_request_options_to_span(config) - if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: - helper.process_request(contents, config) - try: - response_async_generator = await wrapped_func( - self, - model=model, - contents=contents, - config=helper.wrapped_config(config), - **kwargs, - ) - except Exception as error: # pylint: disable=broad-exception-caught - helper.process_error(error) - helper.finalize_processing() - with trace.use_span(span, end_on_exit=True): - raise - - async def _response_async_generator_wrapper(): - with trace.use_span(span, end_on_exit=True): - try: - async for response in response_async_generator: - if ( - helper.sem_conv_opt_in_mode - == _StabilityMode.DEFAULT - ): - helper.process_response(response) - elif ( - helper.sem_conv_opt_in_mode - == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL - ): - helper.process_completion( - contents, response, config - ) - else: - raise ValueError( - f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." - ) - yield response - except Exception as error: - helper.process_error(error) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_request(contents, config) + try: + response_async_generator = await wrapped_func( + self, + model=model, + contents=contents, + config=helper.wrapped_config(config), + **kwargs, + ) + except Exception as error: # pylint: disable=broad-exception-caught + helper.process_error(error) + helper.finalize_processing() + with trace.use_span(span, end_on_exit=True): raise - finally: - helper.finalize_processing() - return _response_async_generator_wrapper() + async def _response_async_generator_wrapper(): + candidates: list[Candidate] = [] + with trace.use_span(span, end_on_exit=True): + try: + async for response in response_async_generator: + if ( + helper.sem_conv_opt_in_mode + == _StabilityMode.DEFAULT + ): + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper._update_response(response) + if response.candidates: + candidates += response.candidates + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) + yield response + except Exception as error: + helper.process_error(error) + raise + finally: + helper._maybe_log_completion_details(contents, candidates, config) + helper.finalize_processing() + + return _response_async_generator_wrapper() return instrumented_generate_content_stream From 0ff51f3c2f1783ec26a75f4f3eeeaa013cdff453 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 8 Oct 2025 14:59:28 +0000 Subject: [PATCH 21/34] fix: remove gen_ai.system from latest sem_conv --- .../google_genai/generate_content.py | 35 ++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 2498584ffe..e9bfd04a2e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -221,6 +221,31 @@ def _add_request_options_to_span( span.set_attribute(key, value) +def _get_gen_ai_request_attributes(config: Union[GenerateContentConfigOrDict, None]) -> dict[str, Any]: + if not config: + return {} + attributes: dict[str, Any] = {} + config = _coerce_config_to_object(config) + if config.seed: + attributes[gen_ai_attributes.GEN_AI_REQUEST_SEED] = ( + config.seed + ) + if config.candidate_count: + attributes[gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT] = ( + config.candidate_count + ) + if config.response_mime_type: + if config.response_mime_type == "text/plain": + attributes[gen_ai_attributes.GEN_AI_OUTPUT_TYPE] = "text" + elif config.response_mime_type == "application/json": + attributes[gen_ai_attributes.GEN_AI_OUTPUT_TYPE] = "json" + else: + attributes[gen_ai_attributes.GEN_AI_OUTPUT_TYPE] = ( + config.response_mime_type + ) + return attributes + + def _get_response_property(response: GenerateContentResponse, path: str): path_segments = path.split(".") current_context = response @@ -347,7 +372,6 @@ def start_span_as_current_span( start_time=self._start_time, attributes={ code_attributes.CODE_FUNCTION_NAME: function_name, - gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system, gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model, gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME, }, @@ -399,6 +423,10 @@ def finalize_processing(self): gen_ai_attributes.GEN_AI_RESPONSE_FINISH_REASONS, sorted(self._finish_reasons_set), ) + if self.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + span.set_attribute( + gen_ai_attributes.GEN_AI_SYSTEM, self._genai_system + ) self._record_token_usage_metric() self._record_duration_metric() @@ -468,9 +496,7 @@ def _maybe_log_completion_details( candidates: list[Candidate], config: Optional[GenerateContentConfigOrDict] = None, ): - attributes = { - gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system, - } + attributes = _get_gen_ai_request_attributes(config) system_instructions = [] if system_content := _config_to_system_instruction(config): system_instructions = to_system_instructions( @@ -508,6 +534,7 @@ def _maybe_log_completion_details( ) ) span.set_attributes(completion_details_attributes) + span.set_attributes(attributes) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, ContentCapturingMode.SPAN_AND_EVENT, From e51ef3784976e727405df5d2938613115edf062a Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 8 Oct 2025 15:29:59 +0000 Subject: [PATCH 22/34] refactor: import encoder from utils-genai --- .../instrumentation/google_genai/generate_content.py | 4 ++-- .../instrumentation/google_genai/message.py | 10 ---------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index e9bfd04a2e..162c46a1bd 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -57,13 +57,13 @@ MessagePart, OutputMessage, ) +from opentelemetry.util.genai.utils import _GenAiJsonEncoder from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG from .dict_util import flatten_dict from .flags import is_content_recording_enabled from .message import ( - Base64JsonEncoder, to_input_messages, to_output_messages, to_system_instructions, @@ -316,7 +316,7 @@ def _create_completion_details_attributes( if as_str: return { - k: json.dumps(v, cls=Base64JsonEncoder) + k: json.dumps(v, cls=_GenAiJsonEncoder) for k, v in attributes.items() } diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index 6a515163b6..9d1027caa6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -14,11 +14,8 @@ from __future__ import annotations -import json import logging -from base64 import b64encode from enum import Enum -from typing import Any from google.genai import types as genai_types @@ -42,13 +39,6 @@ class Role(str, Enum): TOOL = "tool" -class Base64JsonEncoder(json.JSONEncoder): - def default(self, o: Any) -> Any: - if isinstance(o, bytes): - return b64encode(o).decode() - return super().default(o) - - _logger = logging.getLogger(__name__) From 5c3585305cb1d174dd1d47eba611b5d24380d7c5 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 8 Oct 2025 15:35:29 +0000 Subject: [PATCH 23/34] build: update deps versions. --- .../pyproject.toml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml index f4f91d1c90..18c02a0adc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml @@ -38,13 +38,14 @@ classifiers = [ ] dependencies = [ "opentelemetry-api >=1.37, <2", - "opentelemetry-instrumentation >=0.52b1, <2", - "opentelemetry-semantic-conventions >=0.52b1, <2" + "opentelemetry-instrumentation >=0.58b0, <2", + "opentelemetry-semantic-conventions >=0.58b0, <2", + "opentelemetry-util-genai >= 0.1b0" ] [project.optional-dependencies] instruments = [ - "google-genai >= 1.0.0" + "google-genai >= 1.32.0" ] [project.entry-points.opentelemetry_instrumentor] From 46b10d1ec931651df52ec1863b8d97cc847d384c Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 11:19:48 +0000 Subject: [PATCH 24/34] refactor: deduplicate creating attributes, use gen_ai_json_dumps --- .../google_genai/generate_content.py | 41 +++++++------------ 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 162c46a1bd..6a12806003 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -57,7 +57,7 @@ MessagePart, OutputMessage, ) -from opentelemetry.util.genai.utils import _GenAiJsonEncoder +from opentelemetry.util.genai.utils import gen_ai_json_dumps from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG @@ -314,12 +314,6 @@ def _create_completion_details_attributes( dataclasses.asdict(sys_instr) for sys_instr in system_instructions ] - if as_str: - return { - k: json.dumps(v, cls=_GenAiJsonEncoder) - for k, v in attributes.items() - } - return attributes @@ -521,37 +515,30 @@ def _maybe_log_completion_details( span=span, log_record=event, ) - if self._content_recording_enabled in [ - ContentCapturingMode.SPAN_ONLY, - ContentCapturingMode.SPAN_AND_EVENT, - ]: - completion_details_attributes = ( - _create_completion_details_attributes( - input_messages, - output_messages, - system_instructions, - as_str=True, - ) + completion_details_attributes = ( + _create_completion_details_attributes( + input_messages, + output_messages, + system_instructions, ) - span.set_attributes(completion_details_attributes) - span.set_attributes(attributes) + ) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - completion_details_attributes = ( - _create_completion_details_attributes( - input_messages, - output_messages, - system_instructions, - ) - ) event.attributes = { **(event.attributes or {}), **completion_details_attributes, } self._otel_wrapper.log_completion_details(event=event) + if self._content_recording_enabled in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + span.set_attributes({k: gen_ai_json_dumps(v) for k, v in completion_details_attributes.items()}) + span.set_attributes(attributes) + def _maybe_log_system_instruction( self, config: Optional[GenerateContentConfigOrDict] = None ): From 5703096a728cde997dc3f3551ef7b9a3ef3ad634 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 11:52:43 +0000 Subject: [PATCH 25/34] refactor: move blobpart and filedatapart to instrumentation-google-genai package. --- .../instrumentation/google_genai/message.py | 21 +++++++++++++++++-- .../src/opentelemetry/util/genai/types.py | 17 --------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index 9d1027caa6..70494b19a9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -16,12 +16,12 @@ import logging from enum import Enum +from dataclasses import dataclass +from typing import Literal from google.genai import types as genai_types from opentelemetry.util.genai.types import ( - BlobPart, - FileDataPart, FinishReason, InputMessage, MessagePart, @@ -39,6 +39,23 @@ class Role(str, Enum): TOOL = "tool" +@dataclass +class BlobPart: + data: bytes + mime_type: str + type: Literal["blob"] = "blob" + + +@dataclass +class FileDataPart: + mime_type: str + uri: str + type: Literal["file_data"] = "file_data" + + class Config: + extra = "allow" + + _logger = logging.getLogger(__name__) diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py index 4a3952d16e..569e7e7e00 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py @@ -55,23 +55,6 @@ class Text: type: Literal["text"] = "text" -@dataclass -class BlobPart: - data: bytes - mime_type: str - type: Literal["blob"] = "blob" - - -@dataclass -class FileDataPart: - mime_type: str - uri: str - type: Literal["file_data"] = "file_data" - - class Config: - extra = "allow" - - MessagePart = Union[Text, ToolCall, ToolCallResponse, Any] From b564008182bc33de46849615dd1a979d5da2fda6 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 11:57:01 +0000 Subject: [PATCH 26/34] test: add comment for headers. --- .../tests/generate_content/test_e2e.py | 1 + 1 file changed, 1 insertion(+) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 15fdd3dd3e..b31eb7ab08 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -394,6 +394,7 @@ def _factory(): api_key=gemini_api_key, vertexai=False, http_options=types.HttpOptions( + # to prevent compression headers={"accept-encoding": "identity"} ), ) From 8b8995c126de0fdfdb9c53b1969327444d916dfd Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 12:13:01 +0000 Subject: [PATCH 27/34] test: fix span attributes formatting. --- .../tests/generate_content/nonstreaming_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 5c7e683456..4ae6f00063 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -360,19 +360,19 @@ def test_new_semconv_record_completion_in_span(self): span.attributes[ gen_ai_attributes.GEN_AI_INPUT_MESSAGES ], - '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]', + '[{"role":"user","parts":[{"content":"Some input","type":"text"}]}]', ) self.assertEqual( span.attributes[ gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES ], - '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]', + '[{"role":"assistant","parts":[{"content":"Some response content","type":"text"}],"finish_reason":""}]', ) self.assertEqual( span.attributes[ gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS ], - '[{"content": "System instruction", "type": "text"}]', + '[{"content":"System instruction","type":"text"}]', ) else: self.assertNotIn( From 9ef75f71508861ff5f88d6ec1454317ee5388e69 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 12:24:51 +0000 Subject: [PATCH 28/34] fix: lint --- .../google_genai/generate_content.py | 41 +++++++++++-------- .../instrumentation/google_genai/message.py | 2 +- .../google_genai/otel_wrapper.py | 6 +-- 3 files changed, 27 insertions(+), 22 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 6a12806003..aa195204e2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -221,15 +221,15 @@ def _add_request_options_to_span( span.set_attribute(key, value) -def _get_gen_ai_request_attributes(config: Union[GenerateContentConfigOrDict, None]) -> dict[str, Any]: +def _get_gen_ai_request_attributes( + config: Union[GenerateContentConfigOrDict, None], +) -> dict[str, Any]: if not config: return {} attributes: dict[str, Any] = {} config = _coerce_config_to_object(config) if config.seed: - attributes[gen_ai_attributes.GEN_AI_REQUEST_SEED] = ( - config.seed - ) + attributes[gen_ai_attributes.GEN_AI_REQUEST_SEED] = config.seed if config.candidate_count: attributes[gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT] = ( config.candidate_count @@ -400,7 +400,9 @@ def process_completion( config: Optional[GenerateContentConfigOrDict] = None, ): self._update_response(response) - self._maybe_log_completion_details(request, response.candidates or [], config) + self._maybe_log_completion_details( + request, response.candidates or [], config + ) def process_error(self, e: Exception): self._error_type = str(e.__class__.__name__) @@ -499,9 +501,7 @@ def _maybe_log_completion_details( input_messages = to_input_messages( contents=transformers.t_contents(request) ) - output_messages = to_output_messages( - candidates=candidates - ) + output_messages = to_output_messages(candidates=candidates) span = trace.get_current_span() event = Event( @@ -515,12 +515,10 @@ def _maybe_log_completion_details( span=span, log_record=event, ) - completion_details_attributes = ( - _create_completion_details_attributes( - input_messages, - output_messages, - system_instructions, - ) + completion_details_attributes = _create_completion_details_attributes( + input_messages, + output_messages, + system_instructions, ) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, @@ -536,7 +534,12 @@ def _maybe_log_completion_details( ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - span.set_attributes({k: gen_ai_json_dumps(v) for k, v in completion_details_attributes.items()}) + span.set_attributes( + { + k: gen_ai_json_dumps(v) + for k, v in completion_details_attributes.items() + } + ) span.set_attributes(attributes) def _maybe_log_system_instruction( @@ -844,7 +847,9 @@ def instrumented_generate_content_stream( helper.process_error(error) raise finally: - helper._maybe_log_completion_details(contents, candidates, config) + helper._maybe_log_completion_details( + contents, candidates, config + ) helper.finalize_processing() return instrumented_generate_content_stream @@ -982,7 +987,9 @@ async def _response_async_generator_wrapper(): helper.process_error(error) raise finally: - helper._maybe_log_completion_details(contents, candidates, config) + helper._maybe_log_completion_details( + contents, candidates, config + ) helper.finalize_processing() return _response_async_generator_wrapper() diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index 70494b19a9..29ef112a6f 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -15,8 +15,8 @@ from __future__ import annotations import logging -from enum import Enum from dataclasses import dataclass +from enum import Enum from typing import Literal from google.genai import types as genai_types diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py index 136bf8b14f..b82288dec9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py @@ -18,7 +18,7 @@ import google.genai -from opentelemetry._logs import LogRecord, Logger, LoggerProvider +from opentelemetry._logs import Logger, LoggerProvider, LogRecord from opentelemetry.metrics import Meter, MeterProvider from opentelemetry.semconv._incubating.metrics import gen_ai_metrics from opentelemetry.semconv.schemas import Schemas @@ -40,9 +40,7 @@ class OTelWrapper: - def __init__( - self, tracer: Tracer, logger: Logger, meter: Meter - ): + def __init__(self, tracer: Tracer, logger: Logger, meter: Meter): self._tracer = tracer self._logger = logger self._meter = meter From 20c082c19434969339b716ebb691afda2b4eb215 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 13:03:57 +0000 Subject: [PATCH 29/34] fix: use LogRecord instead of Event. --- .../instrumentation/google_genai/generate_content.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index aa195204e2..4598915dc3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -38,7 +38,7 @@ ) from opentelemetry import trace -from opentelemetry._events import Event +from opentelemetry._logs import LogRecord from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, @@ -504,8 +504,8 @@ def _maybe_log_completion_details( output_messages = to_output_messages(candidates=candidates) span = trace.get_current_span() - event = Event( - name="gen_ai.client.inference.operation.details", + event = LogRecord( + event_name="gen_ai.client.inference.operation.details", attributes=attributes, ) self.completion_hook.on_completion( From f4df9fbe17eb459a058be44a20db4dcd24336698 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 13:48:34 +0000 Subject: [PATCH 30/34] docs: update changelog --- .../opentelemetry-instrumentation-google-genai/CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md index 39a9aa5eee..cebe7b92e5 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md @@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +- Implement the new semantic convention changes made in https://github.com/open-telemetry/semantic-conventions/pull/2179. +A single event (`gen_ai.client.inference.operation.details`) is used to capture Chat History. This is opt-in, +an environment variable OTEL_SEMCONV_STABILITY_OPT_IN needs to be set to `gen_ai_latest_experimental` to see them ([#3386](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3386)) +- Support CompletionHook for upload to cloud storage. + ## Version 0.3b0 (2025-07-08) - Add automatic instrumentation to tool call functions ([#3446](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3446)) From 39deea933b7d32b34e1c4b6dea293a2c8d676c23 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 13:48:49 +0000 Subject: [PATCH 31/34] build: update uv.lock --- uv.lock | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/uv.lock b/uv.lock index 6b290de793..b7b2ce1e71 100644 --- a/uv.lock +++ b/uv.lock @@ -1466,7 +1466,7 @@ wheels = [ [[package]] name = "google-genai" -version = "1.16.1" +version = "1.42.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1474,12 +1474,13 @@ dependencies = [ { name = "httpx" }, { name = "pydantic" }, { name = "requests" }, + { name = "tenacity" }, { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ca/1f/1a52736e87b4a22afef615de45e2f509fbfb55c09798620b0c3f394076ef/google_genai-1.16.1.tar.gz", hash = "sha256:4b4ed4ed781a9d61e5ce0fef1486dd3a5d7ff0a73bd76b9633d21e687ab998df", size = 194270 } +sdist = { url = "https://files.pythonhosted.org/packages/18/03/84d04ce446d885eb978abb4b7c785f54a39435f02b182f457a996f5c9eb4/google_genai-1.42.0.tar.gz", hash = "sha256:0cef624c725a358f182e6988632371205bed9be1b1dbcf4296dbbd4eb4a9fb5d", size = 235620 } wheels = [ - { url = "https://files.pythonhosted.org/packages/bc/31/30caa8d4ae987e47c5250fb6680588733863fd5b39cacb03ba1977c29bde/google_genai-1.16.1-py3-none-any.whl", hash = "sha256:6ae5d24282244f577ca4f0d95c09f75ab29e556602c9d3531b70161e34cd2a39", size = 196327 }, + { url = "https://files.pythonhosted.org/packages/f2/0a/8519cb752c10254899608de5c8cf5ff5ae05260a4ad5db0087fa466ddf46/google_genai-1.42.0-py3-none-any.whl", hash = "sha256:1e45c3ecc630a358c153a08b10d5b03d7c70cf3342fd116ac8a6cc4262cd81e8", size = 236204 }, ] [[package]] @@ -3144,6 +3145,7 @@ dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-instrumentation" }, { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-genai" }, ] [package.optional-dependencies] @@ -3153,10 +3155,11 @@ instruments = [ [package.metadata] requires-dist = [ - { name = "google-genai", marker = "extra == 'instruments'", specifier = ">=1.0.0" }, + { name = "google-genai", marker = "extra == 'instruments'", specifier = ">=1.32.0" }, { name = "opentelemetry-api", git = "https://github.com/open-telemetry/opentelemetry-python?subdirectory=opentelemetry-api&branch=main" }, { name = "opentelemetry-instrumentation", editable = "opentelemetry-instrumentation" }, { name = "opentelemetry-semantic-conventions", git = "https://github.com/open-telemetry/opentelemetry-python?subdirectory=opentelemetry-semantic-conventions&branch=main" }, + { name = "opentelemetry-util-genai", editable = "util/opentelemetry-util-genai" }, ] provides-extras = ["instruments"] From 661998ba26b5a4f6cc09b96a69c7cdde3ae5c05d Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 13:59:38 +0000 Subject: [PATCH 32/34] test: disable lint --- .../tests/generate_content/test_tool_call_instrumentation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py index 78eb642c60..2dc0a3d633 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py @@ -306,7 +306,7 @@ def test_new_semconv_tool_calls_record_parameter_values(self): with patched_environ, patched_otel_mapping: def handle(*args, **kwargs): - calls.append((args, kwargs)) + calls.append((args, kwargs)) # pylint: disable=cell-var-from-loop return "some result" def somefunction(someparam, otherparam=2): @@ -397,7 +397,7 @@ def test_new_semconv_tool_calls_record_return_values(self): with patched_environ, patched_otel_mapping: def handle(*args, **kwargs): - calls.append((args, kwargs)) + calls.append((args, kwargs)) # pylint: disable=cell-var-from-loop return "some result" def somefunction(x, y=2): From 49264eea7a90fe07df35f332d60033934312b660 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 9 Oct 2025 15:48:35 +0000 Subject: [PATCH 33/34] build: unpin opentelemetry-util-genai version --- .../pyproject.toml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml index 18c02a0adc..6d20ef4a17 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml @@ -40,8 +40,9 @@ dependencies = [ "opentelemetry-api >=1.37, <2", "opentelemetry-instrumentation >=0.58b0, <2", "opentelemetry-semantic-conventions >=0.58b0, <2", - "opentelemetry-util-genai >= 0.1b0" -] + # TODO https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3786: restrict + # version after the first release + "opentelemetry-util-genai",] [project.optional-dependencies] instruments = [ From 9bd4d257517e078ae752b1295d2bef2dfdf4d6ce Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Sat, 11 Oct 2025 10:21:24 +0000 Subject: [PATCH 34/34] test: add e2e completion_hook test --- ...-2.5-flash-vertexaiapi-async-default].yaml | 102 +++++++++++++++ ...-2.5-flash-vertexaiapi-sync-default].yaml} | 20 +-- ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 102 --------------- ...2.5-flash-vertexaiapi-async-default].yaml} | 16 +-- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 --------------- ...i-2.5-flash-vertexaiapi-sync-default].yaml | 102 +++++++++++++++ ...-2.5-flash-vertexaiapi-async-default].yaml | 116 +++++++++++++++++ ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 --------------- ...i-2.5-flash-vertexaiapi-sync-default].yaml | 109 ++++++++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 109 ---------------- ...-2.5-flash-vertexaiapi-async-default].yaml | 96 ++++++++++++++ ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 123 ------------------ ...i-2.5-flash-vertexaiapi-sync-default].yaml | 110 ++++++++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 109 ---------------- ...-enable_completion_hook-experimental].yaml | 102 +++++++++++++++ ...-enable_completion_hook-experimental].yaml | 102 +++++++++++++++ ...-enable_completion_hook-experimental].yaml | 102 +++++++++++++++ ...-enable_completion_hook-experimental].yaml | 102 +++++++++++++++ .../tests/generate_content/test_e2e.py | 116 ++++++++++++++++- .../tests/requirements.latest.txt | 2 +- .../tests/requirements.oldest.txt | 2 +- 21 files changed, 1174 insertions(+), 672 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml rename instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/{test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml => test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml} (50%) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml rename instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/{test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml => test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml} (54%) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml new file mode 100644 index 0000000000..9e3f748b60 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In tangled webs where microservices gleam,\nAnd cloud-born systems whisper through a dream,\nA silent plea arises, soft and low:\n\"What truly happens? Where does the data flow?\"\n\nFrom this complex symphony, a standard came,\nTo cast a light upon the hidden game.\nNot tied to one, but open, brave, and free,\n**OpenTelemetry**, for all to see.\n\nIt offers sight, where once was hazy guess,\nThree vital pillars, banishing distress:\n**First, Traces** weave a path, a golden thread,\nFrom first request, through every server led.\nEach tiny hop, a span, a measured beat,\nRevealing journeys, bittersweet and fleet.\n\nThen **Metrics** rise, like numbers in the air,\nA pulse, a count, a burden systems bear.\nCPU hum, or memory's slow creep,\nThe silent sentinels, while data sleeps.\nGauges, counters, histograms defined,\nThe system's health, precisely, you will find.\n\nAnd **Logs**, the whispers from the code's deep heart,\nEach message thrown, a critical new start.\nErrors caught, or triumphs understood,\nThe story told, for bad, or for the good.\nContext rich, linked to the trace it keeps,\nWhere hidden truths, the developer sweeps.\n\nThese three unite, no longer kept apart,\nA common tongue, a truly open art.\nWith SDKs, your code can now converse,\nAnd send its data, 'cross the universe.\nTo any backend, or analytics tool,\nBreaking the vendor's tightly guarded rule.\n\nSo when the bug lurks deep, or users fret,\nOr performance lags, a problem to be met,\n**OpenTelemetry** sheds its guiding light,\nTransforming darkness into knowing sight.\nA tapestry of data, richly spun,\nThe battle for observability won." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -1.836124080242497 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 404, + "totalTokenCount": 1958, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 404 + } + ], + "thoughtsTokenCount": 1546 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-10T15:56:32.819932Z", + "responseId": "sCzpaNyFMuLlnvgP3MqN0AQ" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Fri, 10 Oct 2025 15:56:44 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml similarity index 50% rename from instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml rename to instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml index 68fe1978e2..09a94edd30 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml @@ -27,7 +27,7 @@ interactions: host: - us-central1-aiplatform.googleapis.com user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 + - google-genai-sdk/1.32.0 gl-python/3.10.18 x-goog-api-client: - x-goog-user-project: @@ -44,18 +44,18 @@ interactions: "role": "model", "parts": [ { - "text": "In tangled clouds, where microservices roam,\nAnd data streams find a circuitous home,\nA dark abyss, where errors often hide,\nWith mysteries deep, there's nowhere to confide.\n\nBut from the void, a standard takes its flight,\nTo bring the hidden processes to light.\nOpenTelemetry, a unified design,\nA beacon in the digital divine.\n\nWith traces keen, a journey we embark,\nEach operation leaves its vital mark.\nFrom service entry to a database's keep,\nContext propagated, secrets now don't sleep.\n\nThen metrics gleam, in charts and graphs displayed,\nResponse times measured, no detail delayed.\nThrough counters, gauges, histograms so precise,\nPerformance whispered, at a measurable price.\n\nAnd logs, the tales of moments as they pass,\nDiscrete events within the digital glass.\nWith structured wisdom, messages unfold,\nA narrative of what the systems hold.\n\nThrough instrumentation, code begins to speak,\nA common language that all systems seek.\nThe Collector waits, a sentinel so grand,\nTo process, batch, and send across the land.\n\nNo vendor binds, no proprietary chain,\nBut open standards, free from selfish gain.\nA universal tongue, for insight to arise,\nReflecting truth before observing eyes.\n\nFrom scattered signals, patterns now emerge,\nTo quell the chaos, stem the error's surge.\nWith deep observability, we understand,\nThe intricate dance across the digital sand.\n\nSo hail this standard, built with thoughtful grace,\nTo light the corners of computing space.\nOpenTelemetry, a steadfast, vital guide,\nWhere understanding and precision ride." + "text": "When systems grow, a tangled, vast domain,\nWhere microservices dance, through sun and rain,\nA shadow falls, where logic hides its plea,\nAnd whispers vanish, lost for all to see.\n\nThen from the cloud, a guiding star takes flight,\nDispelling gloom with its clear, steady light.\nOpenTelemetry, the name we praise,\nUnifying vision through digital haze.\n\nA golden thread, a trace begins to gleam,\nThrough every service, like a waking dream.\nWith parent-child spans, a timeline clear,\nWhat once was hidden, now begins to appear.\n\nThe metrics flow, a system's vital pulse,\nWith counters climbing, no evasive impulse.\nGauges ascend and dip, where loads reside,\nThe heartbeat known, with nowhere left to hide.\n\nAnd logs appear, with context richly cast,\nEach crucial event, for future and for past.\nAttached to spans, a detailed, storied chime,\nUnveiling insights, lost to passing time.\n\nThrough RPC calls, and message queues that bind,\nContext propagates, for every searching mind.\nA hidden thread, connecting all the parts,\nRevealing patterns, igniting thoughtful starts.\n\nNo vendor lock, no proprietary chain,\nYour data streams, through sunshine and through rain.\nTo chosen backends, it will freely roam,\nObservability, now finds its rightful home.\n\nSo hail to Otel, the standard, strong and true,\nBringing clarity, for all our dev crews.\nFrom dark unknowns, to light, a clear display,\nOpenTelemetry illuminates the way." } ] }, "finishReason": "STOP", - "avgLogprobs": -2.298871321801128 + "avgLogprobs": -2.441835476585546 } ], "usageMetadata": { "promptTokenCount": 8, - "candidatesTokenCount": 349, - "totalTokenCount": 2326, + "candidatesTokenCount": 339, + "totalTokenCount": 2639, "trafficType": "ON_DEMAND", "promptTokensDetails": [ { @@ -66,14 +66,14 @@ interactions: "candidatesTokensDetails": [ { "modality": "TEXT", - "tokenCount": 349 + "tokenCount": 339 } ], - "thoughtsTokenCount": 1969 + "thoughtsTokenCount": 2292 }, "modelVersion": "gemini-2.5-flash", - "createTime": "2025-10-06T15:50:52.185046Z", - "responseId": "XOXjaNalC4-nnvgPsaPcuAw" + "createTime": "2025-10-10T15:56:16.631749Z", + "responseId": "oCzpaMXHJo_B2PgPq7j_8AY" } headers: Accept-Ranges: @@ -81,7 +81,7 @@ interactions: Content-Type: - application/json; charset=UTF-8 Date: - - Mon, 06 Oct 2025 15:51:05 GMT + - Fri, 10 Oct 2025 15:56:30 GMT Server: - scaffolding on HTTPServer2 Transfer-Encoding: diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml deleted file mode 100644 index 5f8608e42d..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml +++ /dev/null @@ -1,102 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - accept: - - '*/*' - accept-encoding: - - identity - connection: - - keep-alive - content-length: - - '92' - content-type: - - application/json - host: - - us-central1-aiplatform.googleapis.com - user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "In realms where code profoundly weaves its might,\nAnd services converse in hurried streams,\nA common tongue emerges, clear and bright,\nTo shed its light upon our digital dreams.\n\nNo longer hidden, lost in labyrinthine ways,\nThe secret life of systems, now revealed,\nThrough OpenTelemetry's enlightened gaze,\nA universal language, fairly wielded.\n\nA **Trace** begins, a journey's golden thread,\nFrom distant client to the deepest core,\nEach tiny \"span,\" a word that has been said,\nMapping the path, from shore to server's shore.\nContext propagated, like a gentle breeze,\nAcross the network, through the busy maze,\nConnecting dots with elegant, calm ease,\nUnveiling time, in swift or lingering phases.\n\nThen **Metrics** pulse, a steady, rhythmic beat,\nThe CPU's hum, the memory's steady climb,\nLatency's dance, both bitter and so sweet,\nA quantitative pulse, defying time.\nCounters tally, gauges hold their ground,\nHistograms paint distributions, clear and bold,\nThe health of systems, constantly profound,\nA silent story, in their numbers told.\n\nAnd finally, the **Logs**, a detailed scroll,\nEach event marked, a narrative precise,\nWhat happened where, beyond our own control,\nA key to debug, a moment's wise advice.\nStructured records, searchable and plain,\nThey whisper errors, or triumphant news,\nA chronological, unwavering refrain,\nUnlocking insights, banishing confusions.\n\nFrom humble agents, nestled in the code,\nTo collectors standing, vigilant and keen,\nThe data flows, a precious, heavy load,\nTo any backend, for display and screen.\nVendor-neutral, open, strong, and free,\nIt breaks the silos, tears the walls apart,\nFor true observability's decree,\nTo understand the system's beating heart.\n\nSo hail this standard, built with thoughtful hand,\nFor those who build, and those who seek to mend,\nOpenTelemetry, across the land,\nOur guiding light, until the journey's end." - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -2.4816789437603477 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 453, - "totalTokenCount": 2468, - "trafficType": "ON_DEMAND", - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 453 - } - ], - "thoughtsTokenCount": 2007 - }, - "modelVersion": "gemini-2.5-flash", - "createTime": "2025-10-06T15:50:35.682132Z", - "responseId": "S-XjaJTRKdO6nvgPnMuyoQs" - } - headers: - Accept-Ranges: - - none - Content-Type: - - application/json; charset=UTF-8 - Date: - - Mon, 06 Oct 2025 15:50:51 GMT - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - X-Origin - - Referer - - Origin,Accept-Encoding - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml similarity index 54% rename from instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml rename to instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml index 09a734361f..31b149d902 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml @@ -27,7 +27,7 @@ interactions: host: - us-central1-aiplatform.googleapis.com user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 + - google-genai-sdk/1.32.0 gl-python/3.10.18 x-goog-api-client: - x-goog-user-project: @@ -44,18 +44,18 @@ interactions: "role": "model", "parts": [ { - "text": "In lands of code, where microservices roam,\nAnd hidden pathways lead our data home,\nWhere errors lurk and mysteries reside,\nA guiding light we desperately confide.\n\nThen from the open source, a beacon gleams,\nUnifying visions, waking dormant dreams.\nNo longer bound by vendor's watchful hand,\nOpenTelemetry, across the digital land.\n\nIt weaves the **Traces**, threads of golden light,\nThrough every service, in the darkest night.\nEach **Span** a step, a function's brief embrace,\nRevealing journeys, time, and hidden space.\n\nThen **Metrics** rise, in counters and in gauges,\nPerformance stories, told on countless pages.\nCPU hum, latency's slow tide,\nThe health of systems, nowhere left to hide.\n\nAnd **Logs**, the whispers from each busy node,\nA chronicle of actions, burdens freely showed.\nFrom info messages to errors stark and plain,\nContextual wisdom, banishing the pain.\n\nWith SDKs and agents, code aware it grows,\nThrough sturdy Collectors, the data swiftly flows.\nTo chosen backends, be they new or old,\nA unified stream, a story to unfold.\n\nNo longer blind, to whispers in the dark,\nWe find the culprits, leaving their dark mark.\nUnderstand the flow, the logic and the strain,\nAnd build with confidence, again and again.\n\nSo hail the standard, vibrant, strong, and free,\nFor every system, for you, for me.\nOpenTelemetry, a power understood,\nIlluminating darkness, for the common good." + "text": "In tangled webs of code, where logic spins,\nA silent plea for sight, when trouble wins.\nDistributed dreams, where microservices hum,\nCan hide the fault, from whence the failures come.\n\nThen from the void, a beacon starts to gleam,\nOpenTelemetry, a developer's dream.\nA standard hand, a vendor-neutral plea,\nTo weave the fabric of observability.\n\nIt paints the journey, step by fragile step,\nA request's life, where secrets often slept.\nEach function call, a \"span\" upon the thread,\nConnecting dots, where errors might have spread.\n\nThe system's heartbeat, captured, clear, and bright,\nIn counters climbing, through the digital night.\nGauges that measure, histograms that show,\nThe ebb and flow, of where the currents go.\n\nAnd logs, the narratives, of every deed,\nA detailed story, planted like a seed.\nContext attached, for clarity and grace,\nTo pinpoint flaws, and put them in their place.\n\nNo vendor lock, no proprietary chain,\nJust open arms, to ease the developer's pain.\nA unified approach, for all to see,\nThe core of insight, growing strong and free.\n\nThrough careful pipelines, data starts to flow,\nTo trusted backends, where the insights grow.\nNo matter where your services reside,\nIts watchful eye, has nowhere left to hide.\n\nSo hail the standard, comprehensive, vast,\nA future built, where understanding's cast.\nFor complex systems, now a guiding light,\nOpenTelemetry, makes everything bright." } ] }, "finishReason": "STOP", - "avgLogprobs": -1.8066022087545957 + "avgLogprobs": -1.920527738683364 } ], "usageMetadata": { "promptTokenCount": 8, "candidatesTokenCount": 340, - "totalTokenCount": 2275, + "totalTokenCount": 2040, "trafficType": "ON_DEMAND", "promptTokensDetails": [ { @@ -69,11 +69,11 @@ interactions: "tokenCount": 340 } ], - "thoughtsTokenCount": 1927 + "thoughtsTokenCount": 1692 }, "modelVersion": "gemini-2.5-flash", - "createTime": "2025-10-06T15:50:08.533649Z", - "responseId": "MOXjaJHJIPP02PgP1dnn0Qo" + "createTime": "2025-10-10T15:56:04.095069Z", + "responseId": "lCzpaN3mBYqCnvgP5dCT6AU" } headers: Accept-Ranges: @@ -81,7 +81,7 @@ interactions: Content-Type: - application/json; charset=UTF-8 Date: - - Mon, 06 Oct 2025 15:50:20 GMT + - Fri, 10 Oct 2025 15:56:14 GMT Server: - scaffolding on HTTPServer2 Transfer-Encoding: diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml deleted file mode 100644 index e717acd482..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml +++ /dev/null @@ -1,102 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - accept: - - '*/*' - accept-encoding: - - identity - connection: - - keep-alive - content-length: - - '92' - content-type: - - application/json - host: - - us-central1-aiplatform.googleapis.com - user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "In realms of code, where microservices gleam,\nA hidden flow, a silent, complex stream.\nThe journey lost, where data takes its flight,\nA developer's plea for dawning light.\n\nThen from the cloud, a standard takes its stand,\nWith open heart, across the digital land.\nOpenTelemetry, a beacon strong and true,\nTo pierce the fog, and bring the facts to view.\n\nA Trace begins, a story to unfold,\nThrough every service, brave and strong and bold.\nWith Spans entwined, a parent and its child,\nThe call graph painted, nevermore beguiled.\n\nThen Metrics rise, in numbers clear and plain,\nThe count of errors, or the joyful gain.\nA gauge of load, a histogram's wide spread,\nThe health of systems, clearly to be read.\n\nAnd Logs arrive, with context rich and deep,\nThe structured message, secrets they don't keep.\nEach line a moment, with data to attest,\nA vital clue, to pass the crucial test.\n\nNo vendor lock, no proprietary chain,\nBut open hands, a collaborative domain.\nA unified approach, for all to understand,\nThe data flowing, free across the land.\n\nWith SDKs bright, in languages diverse,\nYou instrument your code, a crucial verse.\nAttach the agents, where the queries run,\nThe observation journey, has begun.\n\nThe Collector waits, a gatherer so keen,\nTo process data, clean and sharp and lean.\nTransform and batch, then send it far away,\nTo chosen backend, come what may.\n\nSo dark no longer, where the errors hide,\nBut clarity shines, your systems open wide.\nThe 'why' revealed, the latency explained,\nWith OTel's power, mastery is gained.\n\nFor engineers who seek to truly see,\nThe inner workings, wild and fiercely free.\nEmbrace the standard, let its power ignite,\nOpenTelemetry, your guiding, digital light." - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -1.5871007582720589 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 425, - "totalTokenCount": 2470, - "trafficType": "ON_DEMAND", - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 425 - } - ], - "thoughtsTokenCount": 2037 - }, - "modelVersion": "gemini-2.5-flash", - "createTime": "2025-10-06T15:50:21.557646Z", - "responseId": "PeXjaM6EItur3NoPkYPI2A4" - } - headers: - Accept-Ranges: - - none - Content-Type: - - application/json; charset=UTF-8 - Date: - - Mon, 06 Oct 2025 15:50:34 GMT - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - X-Origin - - Referer - - Origin,Accept-Encoding - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml new file mode 100644 index 0000000000..ad43798dca --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In systems vast, where services reside,\nA hidden flow, where secrets often hide.\nWhen errors bloom or latency takes hold,\nWe seek the story, yet untold.\n\nThen from the void, a beacon bright appears,\nDispelling shadows, calming all our fears.\n'Tis OpenTelemetry, a guiding hand,\nTo understand the workings of our land.\n\nA standard born, for code to freely speak,\nThe truths our tangled systems always seek.\nWith instrumentation, libraries we weave,\nA common language, all can now believe.\n\nEach call, each task, a journey, swift or slow,\nA **Trace** it paints, to show us where to go.\nWith nested **Spans**, a parent-child embrace,\nWe see the steps, the timing, and the space.\nFrom front-end click to database's deep hum,\nThe full request path, clearly overcome.\n\nThen **Metrics** rise, in streams precise and true,\nCPU, memory, requests old and new.\nCounters climb, and gauges ebb and flow,\nThe system's pulse, a steady, measured glow.\nNo guesswork now, but data, firm and clear,\nPerformance trends, throughout the day and year.\n\nAnd **Logs**, once scattered, now aligned with care,\nContext linking, everywhere, everywhere.\nA unique ID, through layers it will thread,\nConnecting output, words that once were dead.\nFrom single events, a narrative we glean,\nThe bigger picture, beautifully seen.\n\nNo vendor lock, no proprietary chain,\nJust open standards, easing every pain.\nThe Collector waits, a gatherer so grand,\nTo process data, from every digital hand.\nThen send it forth, to chosen backend store,\nFor insights deep, forevermore.\n\nFrom microservices to serverless flight,\nIt brings the dark domains into the light.\nA tapestry woven, of signals three,\nFor clarity and deep observability.\nSo hail to OTel, the unifying way,\nThat turns confusion into clear display." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -1.8014850572405456 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 433, + "totalTokenCount": 1918, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 433 + } + ], + "thoughtsTokenCount": 1477 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-10T15:55:50.511529Z", + "responseId": "hizpaKmcH9qs698P85HHgAU" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Fri, 10 Oct 2025 15:56:02 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml new file mode 100644 index 0000000000..a26a593eed --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-default].yaml @@ -0,0 +1,116 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In tangled webs of cloud and code,\\nWhere microservices softly\ + \ strode,\\nA whispered secret, hard to find,\\nThe journey of a user's mind.\\\ + nA black box hummed, a mystery deep,\\nWhile crucial insights lay asleep.\\\ + n\\nBut then a vision, clear and bright,\\nE\"}]}}],\"usageMetadata\": {\"\ + trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\"\ + : \"2025-10-10T15:57:26.830111Z\",\"responseId\": \"5izpaJ_VMqednvgP2KWkqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"merged to chase away the night.\\n**OpenTelemetry**, a guiding\ + \ star,\\nTo show us where our problems are.\\nA standard banner, raised with\ + \ pride,\\nWhere all observability can ride.\\n\\nThrough **Traces**, golden\ + \ threads unfurl,\\nConnecting paths across the world.\\nFrom start to end,\ + \ a user'\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:26.830111Z\",\"\ + responseId\": \"5izpaJ_VMqednvgP2KWkqQ8\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \"s plea,\\nA chain\ + \ of **Spans**, for all to see.\\nContext propagated, strong and true,\\nRevealing\ + \ what each service _do_.\\n\\nAnd **Metrics**, count and watch and chart,\\\ + nThe system's pulse, its beating heart.\\nLatency's grace, or error's bite,\\\ + nThrough\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:26.830111Z\",\"\ + responseId\": \"5izpaJ_VMqednvgP2KWkqQ8\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \" gauges, sums, and\ + \ counters bright.\\nThe CPU's hum, the memory's strain,\\nMade visible, again,\ + \ again.\\n\\nWith **Logs**, the whispered tales are told,\\nEach event, a\ + \ story to unfold.\\nStructured lines, with context deep,\\nThe secrets that\ + \ the services keep.\\nCor\"}]}}],\"usageMetadata\": {\"trafficType\": \"\ + ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:26.830111Z\"\ + ,\"responseId\": \"5izpaJ_VMqednvgP2KWkqQ8\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"related, timestamped,\ + \ and clear,\\nDispelling doubt, and calming fear.\\n\\nNo vendor lock, no\ + \ walled-off maze,\\nJust open standards, through the haze.\\nA common language,\ + \ strong and free,\\nFor all your signals, *three-in-three*.\\nFrom instrumenting,\ + \ to\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:26.830111Z\",\"\ + responseId\": \"5izpaJ_VMqednvgP2KWkqQ8\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \" collect and send,\\\ + nA journey without a bitter end.\\n\\nIt gathers data, rich and wide,\\nFrom\ + \ every corner, deep inside.\\nThen sends it forth, for you to glean,\\nThe\ + \ health and state of each machine.\\nTo troubleshoot fast, with knowing eyes,\\\ + nAnd make your complex system rise.\\n\\nSo\"}]}}],\"usageMetadata\": {\"\ + trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\"\ + : \"2025-10-10T15:57:26.830111Z\",\"responseId\": \"5izpaJ_VMqednvgP2KWkqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" hail to OTel, the guiding hand,\\nThat brings observability\ + \ to command.\\nA unified vision, strong and true,\\nFor clarity, for me,\ + \ for you!\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\"\ + : 8,\"candidatesTokenCount\": 437,\"totalTokenCount\": 2472,\"trafficType\"\ + : \"ON_DEMAND\",\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\"\ + : 8}],\"candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\"\ + : 437}],\"thoughtsTokenCount\": 2027},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-10T15:57:26.830111Z\",\"responseId\": \"5izpaJ_VMqednvgP2KWkqQ8\"\ + }\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Fri, 10 Oct 2025 15:57:39 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml deleted file mode 100644 index b09dd31662..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml +++ /dev/null @@ -1,102 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - accept: - - '*/*' - accept-encoding: - - identity - connection: - - keep-alive - content-length: - - '92' - content-type: - - application/json - host: - - us-central1-aiplatform.googleapis.com - user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"In realms of code, where microservices bloom,\\nA tangled\ - \ web, obscured by digital gloom.\\nWhen errors hide, and latency takes hold,\\\ - nA story lost, a mystery untold.\\n\\nThen from the depths, a guiding light\ - \ appears,\\nTo banish doubt and quell developers' fears.\\n**OpenTelemetry**,\"\ - }]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ - : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"\ - responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\": [{\"\ - content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a standard, bold\ - \ and free,\\nUnifying sight for all the world to see.\\n\\nThrough vast distributed\ - \ lands, it weaves a thread,\\nFrom start to finish, where each packet's led.\\\ - nWith **spans** and parentage, a call stack clear,\\nThe journey's path, dismissing\ - \ every fear.\\n\\n\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ - },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\"\ - ,\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"Then **metrics**\ - \ rise, a pulse, a steady beat,\\nOf CPU, memory, and every server's heat.\\\ - nLatency's dance, throughput's flowing tide,\\nIn charts and graphs, where\ - \ vital truths reside.\\n\\nAnd **logs**, the tales of action, word by word,\\\ - n\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ - : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"\ - responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\": [{\"\ - content\": {\"role\": \"model\",\"parts\": [{\"text\": \"Each event recorded,\ - \ faithfully heard.\\n**Context attached**, correlation strong,\\nWhere truth\ - \ unfolds, and nothing can go wrong.\\n\\nNo vendor lock, no proprietary chain,\\\ - nJust **open standards**, easing every pain.\\nWith SDKs to **instrument**\ - \ your plea,\\nAnd **collectors** gathering, for all to see.\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\n\\nSo clarity descends, where once was night,\\nA system's\ - \ health, revealed in shining light.\\nFor dev and ops, a shared and common\ - \ tongue,\\n**OpenTelemetry**, its virtues widely sung.\"}]},\"finishReason\"\ - : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ - : 309,\"totalTokenCount\": 1779,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 309}],\"thoughtsTokenCount\": 1462},\"\ - modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\"\ - ,\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Date: - - Mon, 06 Oct 2025 15:51:52 GMT - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml new file mode 100644 index 0000000000..674d9ebc64 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml @@ -0,0 +1,109 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In digital realms, where systems intertwine,\\nA complex dance,\ + \ a labyrinthine design.\\nMicroservices hum, a scattered, vast array,\\nWhere\ + \ issues hide, and secrets hold their sway.\\n\\nBut darkness lifts, a beacon\ + \ takes its stand,\\nA guiding star, across the digital land.\\n**OpenTelemetry**,\"\ + }]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:12.055169Z\",\"\ + responseId\": \"2CzpaIGvA4C4nvgPk77D6Ak\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \" its noble, open\ + \ name,\\nTo cast out shadows, stoke the insights' flame.\\n\\nIt tracks each\ + \ path, a thread of light so fine,\\nFrom user's click to logic deep inside.\\\ + nWith **Traces** woven, spans that link and blend,\\nThe journey's story,\ + \ from its start to end\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:12.055169Z\"\ + ,\"responseId\": \"2CzpaIGvA4C4nvgPk77D6Ak\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \".\\n\\nThen\ + \ **Metrics** rise, in numbers, stark and clear,\\nThe pulse, the health,\ + \ banishing all fear.\\nCounters grow, gauges shift and fall,\\nA system's\ + \ heartbeat, answering every call.\\n\\nAnd **Logs** emerge, with details\ + \ rich and deep,\\nEvents recorded, secrets they\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-10T15:57:12.055169Z\",\"responseId\": \"2CzpaIGvA4C4nvgPk77D6Ak\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" will keep.\\nContext bound, to trace and metric too,\\nA\ + \ holistic view, for all the world to view.\\n\\nNo vendor chains, no silos,\ + \ built so high,\\nA standard set, beneath the open sky.\\nPortable data,\ + \ free to flow and roam,\\nTo any backend, finding its true\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-10T15:57:12.055169Z\",\"responseId\": \"2CzpaIGvA4C4nvgPk77D6Ak\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" home.\\n\\nThrough SDKs and agents, smart and keen,\\nIt\ + \ weaves its magic, often quite unseen.\\nInstrumentation, gentle, firm, and\ + \ bright,\\nBringing clarity to the darkest night.\\n\\nSo understand your\ + \ code, its ebb and flow,\\nThe why, the how, the seeds that errors sow.\\\ + n**\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:12.055169Z\",\"\ + responseId\": \"2CzpaIGvA4C4nvgPk77D6Ak\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \"OpenTelemetry**,\ + \ a truth-revealing art,\\nTo build with wisdom, and a confident heart.\"\ + }]},\"finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\"\ + : 8,\"candidatesTokenCount\": 354,\"totalTokenCount\": 2064,\"trafficType\"\ + : \"ON_DEMAND\",\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\"\ + : 8}],\"candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\"\ + : 354}],\"thoughtsTokenCount\": 1702},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-10T15:57:12.055169Z\",\"responseId\": \"2CzpaIGvA4C4nvgPk77D6Ak\"\ + }\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Fri, 10 Oct 2025 15:57:22 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml deleted file mode 100644 index 4f98f0102d..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml +++ /dev/null @@ -1,109 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - accept: - - '*/*' - accept-encoding: - - identity - connection: - - keep-alive - content-length: - - '92' - content-type: - - application/json - host: - - us-central1-aiplatform.googleapis.com - user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"In realms of code, where logic weaves its way,\\nAnd microservices\ - \ in silent dance hold sway,\\nA shadowed maze, where errors often hide,\\\ - nAnd mysteries within the darkness ride.\\n\\nThen from the cloud, a guiding\ - \ star takes flight,\\nOpenTelemetry, bringing forth the light.\\nA universal\ - \ tongue, for systems\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ - },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ - ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" to embrace,\\\ - nTo chart the journey, through time and space.\\n\\nIt paints the **Traces**,\ - \ a thread of golden gleam,\\nEach request's passage, a detailed, vital dream.\\\ - nFrom source to endpoint, through every hop it flows,\\nThe parent-child relationships,\ - \ every service knows.\\n\\nThen\"}]}}],\"usageMetadata\": {\"trafficType\"\ - : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ - 2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" **Metrics** rise, with numbers sharp and clear,\\nThe CPU's\ - \ heartbeat, the latency we fear.\\nCounts, gauges, histograms, precise and\ - \ true,\\nPerformance secrets, brought right into view.\\n\\nAnd **Logs**,\ - \ the tales each crucial moment tells,\\nContextual details, from the deepest\ - \ cells.\\nWith\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ - },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ - ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" trace IDs\ - \ tied, a story now complete,\\nNo isolated entry, bitter-sweet.\\n\\nThrough\ - \ agents calm, the Collector takes its stand,\\nGathering wisdom, from every\ - \ corner of the land.\\nVendor-neutral standard, a freedom it bestows,\\nOn\ - \ how to see, and where the knowledge goes.\\n\\nNo\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" longer guessing, in the dark we grope,\\nBut paths illuminated,\ - \ sparking future hope.\\nFor engineers and SREs, a tool so grand,\\nTo understand\ - \ the landscape, fully in command.\\n\\nSo hail to OTel, its vision strong\ - \ and wide,\\nOur system's heartbeat, can no longer hide\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \".\\nA canvas drawn, where every pixel gleams,\\nThe living\ - \ tapestry of all our digital dreams.\"}]},\"finishReason\": \"STOP\"}],\"\ - usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\": 353,\"\ - totalTokenCount\": 1932,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 353}],\"thoughtsTokenCount\": 1571},\"\ - modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ - ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Date: - - Mon, 06 Oct 2025 15:51:41 GMT - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml new file mode 100644 index 0000000000..d66466a25d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-default].yaml @@ -0,0 +1,96 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In digital realms, where tangled systems weave,\\nAnd microservices\ + \ hum, our trust they receive,\\nA complex dance of logic, fast and grand,\\\ + nTo truly see their heart, a guiding hand.\\n\\nFor this, Open Telemetry takes\ + \ its stand,\\nA common tongue across the digital land.\\nWith\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-10T15:57:01.802582Z\",\"responseId\": \"zSzpaJb-MPXZnvgPisGa2A0\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" **Traces**, pathways clear, from start to end they sweep,\\\ + nUnveiling journeys where the secrets sleep.\\n\\nAnd **Metrics** rise, in\ + \ numbers, stark and true,\\nCPU's soft hum, latency's quick, measured queue.\\\ + nWhile **Logs** recount each moment, event, and\"}]}}],\"usageMetadata\":\ + \ {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-10T15:57:01.802582Z\",\"responseId\": \"zSzpaJb-MPXZnvgPisGa2A0\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" plight,\\nIlluminating shadows with their burning light.\\\ + n\\nNo longer bound by proprietary sway,\\nIt gathers data, come what vendor\ + \ may.\\nAn open hand, a standard, strong and free,\\nFor every language,\ + \ for each API.\\n\\nFrom failing links to bottlenecks unseen,\\nIt paints\ + \ the picture, vibrant\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:01.802582Z\"\ + ,\"responseId\": \"zSzpaJb-MPXZnvgPisGa2A0\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" and serene.\\\ + nEmpowering insight, from the core to edge,\\nA clear advantage, every silent\ + \ pledge.\\n\\nSo hail the standard, unifying and bright,\\nThat brings our\ + \ complex systems into light.\\nOpen Telemetry, a bridge across the maze,\\\ + nUnlocking wisdom in these data-driven days.\"}]},\"finishReason\": \"STOP\"\ + }],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\": 257,\"\ + totalTokenCount\": 1522,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 257}],\"thoughtsTokenCount\": 1257},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:57:01.802582Z\"\ + ,\"responseId\": \"zSzpaJb-MPXZnvgPisGa2A0\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Fri, 10 Oct 2025 15:57:09 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml deleted file mode 100644 index e06f1689d3..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml +++ /dev/null @@ -1,123 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - accept: - - '*/*' - accept-encoding: - - identity - connection: - - keep-alive - content-length: - - '92' - content-type: - - application/json - host: - - us-central1-aiplatform.googleapis.com - user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"In realms of code, where microservices roam,\\nA digital maze,\ - \ far from a single home.\\nWhere calls interweave, and logic softly hums,\\\ - nHow know the path, when trouble swiftly comes?\\nA black box deep, with secrets\ - \ held inside,\\nWhere errors lurk, and issues try to hide\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \".\\n\\nThen a beacon shines, a guiding, steady hand,\\nA common\ - \ tongue, across the digital land.\\n**OpenTelemetry**, its promise clear,\ - \ for all to see,\\nA standard set, for observability.\\nNo vendor lock, but\ - \ freedom's open gate,\\nTo understand, before it is too late.\\n\\n\"}]}}],\"\ - usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"A **trace** begins, a thread of light unfurled,\\nThrough\ - \ services vast, across the cyber world.\\nEach interaction, a **span** along\ - \ the way,\\nFrom user's click, to final, rendered day.\\nA parent's call,\ - \ a child's reply so swift,\\nA\"}]}}],\"usageMetadata\": {\"trafficType\"\ - : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ - 2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" narrative woven, an insightful gift.\\n\\nThe **metrics**\ - \ hum, the system's steady beat,\\nA pulse of health, or looming, cold defeat.\\\ - nCPU's quick breath, memory's deep embrace,\\nRequest rates soar, or slow\ - \ to crawling pace.\\nCounters tick, and gauges softly sway,\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\nRevealing truths, at close of every day.\\n\\nAnd **logs**\ - \ record, the whispers from the core,\\nEach critical event, and nothing less,\ - \ or more.\\nA timestamped tale, of what transpired within,\\nWhere errors\ - \ bloomed, or victories did win.\\nThe full context shared, with every\"}]}}],\"\ - usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" single line,\\nA detailed story, perfectly aligned.\\n\\\ - nThrough **context propagation**, threads entwine,\\nAcross boundaries, making\ - \ the design\\nCoherent, whole, where once were gaping cracks,\\nThe full\ - \ transaction, on its varied tracks.\\nThe **Collector** stands, a guardian\ - \ at the gate,\\nTo gather\"}]}}],\"usageMetadata\": {\"trafficType\": \"\ - ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:18.506243Z\"\ - ,\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" streams, before\ - \ it is too late.\\nTransform, enrich, then send where insights bloom,\\nDispelling\ - \ shadows, chasing back the gloom.\\n\\nSo gaze upon the truth, in light so\ - \ clear,\\nNo longer guessing, banishing all fear.\\nWith OpenTelemetry, the\ - \ path is known,\\nThe digital future, gracefully is sown\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \".\\nFor those who build, and those who bravely run,\\nA deeper\ - \ vision, 'til the work is done.\\nThe complexity tamed, the chaos brought\ - \ to heel,\\nThe heart of systems, finally revealed.\"}]},\"finishReason\"\ - : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ - : 514,\"totalTokenCount\": 2270,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 514}],\"thoughtsTokenCount\": 1748},\"\ - modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:18.506243Z\"\ - ,\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"}\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Date: - - Mon, 06 Oct 2025 15:51:29 GMT - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml new file mode 100644 index 0000000000..25d9819020 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-default].yaml @@ -0,0 +1,110 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"When systems hum, a silent, complex art,\\nWith hidden errors\ + \ playing their part,\\nA need arose, a vision clear and bold,\\nFor stories\ + \ of our code, bravely told.\\n\\nOpenTelemetry, the name we praise,\\nIlluminating\ + \ all our digital haze.\\nIt brings observability to light,\\nTurning darkness\"\ + }]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:56:46.229905Z\",\"\ + responseId\": \"vizpaJGEDvXZnvgPisGa2A0\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \" into vibrant sight.\\\ + n\\nFirst, **Traces** flow, a thread so fine,\\nThrough service calls, a clear\ + \ design.\\nEach span a step, from start to end,\\nA journey mapped, a faithful\ + \ friend.\\nThey show the path, where data goes astray,\\nAnd pinpoint latency,\ + \ come what may.\\n\\n\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:56:46.229905Z\"\ + ,\"responseId\": \"vizpaJGEDvXZnvgPisGa2A0\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"Then **Metrics**\ + \ rise, in steady stream,\\nOf counters, gauges, like a dream.\\nThey show\ + \ us load, and throughput's grace,\\nThe pulse and rhythm of the place.\\\ + nCPU usage, memory's hold,\\nPerformance tales, in numbers told.\\n\\nWhile\ + \ **Logs** detail, each critical event,\"}]}}],\"usageMetadata\": {\"trafficType\"\ + : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ + 2025-10-10T15:56:46.229905Z\",\"responseId\": \"vizpaJGEDvXZnvgPisGa2A0\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\nWith context rich, on purpose sent.\\nFor every error,\ + \ warning, or success,\\nThey fill the gaps, with clear address.\\nStructured\ + \ entries, timestamped and deep,\\nSecrets that our applications keep.\\n\\\ + nFrom code embedded, agent deployed,\\nData's collected, gaps avoided.\\nTo\ + \ open standards,\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:56:46.229905Z\"\ + ,\"responseId\": \"vizpaJGEDvXZnvgPisGa2A0\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" freely bound,\\\ + nNo vendor lock-in can be found.\\nA common language, understood by all,\\\ + nAnswering the complex system's call.\\n\\nFor troubleshooting, deep and wide,\\\ + nWhere hidden problems used to hide,\\nWe find the root, with swift command,\\\ + nAnd build robust systems,\"}]}}],\"usageMetadata\": {\"trafficType\": \"\ + ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:56:46.229905Z\"\ + ,\"responseId\": \"vizpaJGEDvXZnvgPisGa2A0\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" close at hand.\\\ + nA tapestry of insight, woven bright,\\nGuiding our development day and night.\\\ + n\\nA unified vision, strong and free,\\nFor modern systems' destiny.\\nOpenTelemetry,\ + \ a guiding light,\\nMakes complex software shine so bright.\"}]},\"finishReason\"\ + : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ + : 388,\"totalTokenCount\": 2589,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 388}],\"thoughtsTokenCount\": 2193},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-10T15:56:46.229905Z\"\ + ,\"responseId\": \"vizpaJGEDvXZnvgPisGa2A0\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Fri, 10 Oct 2025 15:56:58 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml deleted file mode 100644 index 8b63b05da6..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml +++ /dev/null @@ -1,109 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - accept: - - '*/*' - accept-encoding: - - identity - connection: - - keep-alive - content-length: - - '92' - content-type: - - application/json - host: - - us-central1-aiplatform.googleapis.com - user-agent: - - google-genai-sdk/1.32.0 gl-python/3.11.9 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"In labyrinthine code, where shadows creep,\\nA million microservices,\ - \ secrets keep.\\nPerformance woes, a phantom's silent hand,\\nAcross the\ - \ digital, a darkened land.\\n\\nThen from the mist, a beacon starts to gleam,\\\ - nOpenTelemetry, a waking dream.\\nA common tongue, for data,\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" vast and deep,\\nThe promises of clarity to keep.\\n\\nThe\ - \ trace unfurls, a golden thread so fine,\\nFrom user click, to database divine.\\\ - nEach span a step, a function's hurried breath,\\nUnveiling latency, and coding's\ - \ death.\\n\\nWith metrics gathered, pulses start to\"}]}}],\"usageMetadata\"\ - : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" beat,\\nCPU hums, and memory's hot seat.\\nRequests per second,\ - \ errors on the rise,\\nA living dashboard, open to our eyes.\\n\\nAnd humble\ - \ logs, with details stark and clear,\\nEvents recorded, banishing all fear.\\\ - nThe story told, of what transpired within,\\nA\"}]}}],\"usageMetadata\":\ - \ {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ - ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" whispered secret, where issues begin.\\n\\nThrough context\ - \ woven, links are forged anew,\\nA tapestry of truth, for me and you.\\nNo\ - \ isolated fragment, lost and lone,\\nBut part of history, clearly shown.\\\ - n\\nThe root cause found, where once was dark despair,\\nPerformance tuned,\ - \ beyond all earthly care\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ - },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:06.600201Z\"\ - ,\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \".\\nA system\ - \ known, its whispers and its roar,\\nObservability, forevermore.\\n\\nNo\ - \ vendor locked, no single path to take,\\nAn open standard, for all our systems'\ - \ sake.\\nA community's embrace, robust and wide,\\nWhere wisdom flows, with\ - \ nothing left to hide.\\n\\nSo\"}]}}],\"usageMetadata\": {\"trafficType\"\ - : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ - 2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" hail the tools, that bring the light to bear,\\nOn tangled\ - \ webs, and burdens we must share.\\nOpenTelemetry, a guide so true,\\nRevealing\ - \ insights, shining ever new.\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\ - : {\"promptTokenCount\": 8,\"candidatesTokenCount\": 370,\"totalTokenCount\"\ - : 1904,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\": [{\"modality\"\ - : \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\"\ - : \"TEXT\",\"tokenCount\": 370}],\"thoughtsTokenCount\": 1526},\"modelVersion\"\ - : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"\ - responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"}\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Date: - - Mon, 06 Oct 2025 15:51:16 GMT - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml new file mode 100644 index 0000000000..60077ccb2c --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a haiku about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '93' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "Open data streams,\nMetrics, logs, and traces flow,\nClearly see inside." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -5.934557172987196 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 18, + "totalTokenCount": 459, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 18 + } + ], + "thoughtsTokenCount": 433 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-10T16:32:45.350496Z", + "responseId": "LTXpaKCyFdPlnvgPuajSiQQ" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Fri, 10 Oct 2025 16:32:59 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml new file mode 100644 index 0000000000..53b23d8161 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a haiku about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '93' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "Open data streams,\nMetrics, logs, and traces flow,\nClearly see inside." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -28.687652587890625 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 20, + "totalTokenCount": 1341, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 20 + } + ], + "thoughtsTokenCount": 1313 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-10T16:32:34.499409Z", + "responseId": "IjXpaNG9HoC4nvgPk77D6Ak" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Fri, 10 Oct 2025 16:32:42 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml new file mode 100644 index 0000000000..31af66797a --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async-enable_completion_hook-experimental].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a haiku about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '93' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "Open data streams,\nMetrics, logs, and traces flow,\nClearly see inside." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -6.327557373046875 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 20, + "totalTokenCount": 475, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 20 + } + ], + "thoughtsTokenCount": 447 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-10T16:32:28.805463Z", + "responseId": "HDXpaNeUMaeBnvgPiJ6P-Ao" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Fri, 10 Oct 2025 16:32:31 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml new file mode 100644 index 0000000000..1c0a0275cc --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_upload_hook_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync-enable_completion_hook-experimental].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a haiku about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '93' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.10.18 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "Open data streams,\nMetrics, logs, and traces flow,\nClearly see inside." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -10.948673672146267 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 18, + "totalTokenCount": 663, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 18 + } + ], + "thoughtsTokenCount": 637 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-10T16:32:22.100847Z", + "responseId": "FjXpaO-TBoC4nvgPk77D6Ak" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Fri, 10 Oct 2025 16:32:25 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index b31eb7ab08..36802b65dc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -29,7 +29,9 @@ import os import subprocess import sys +import time +import fsspec import google.auth import google.auth.credentials import google.genai @@ -38,9 +40,19 @@ from google.genai import types from vcr.record_mode import RecordMode +from opentelemetry.instrumentation._semconv import ( + OTEL_SEMCONV_STABILITY_OPT_IN, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) from opentelemetry.instrumentation.google_genai import ( GoogleGenAiSdkInstrumentor, ) +from opentelemetry.util.genai.environment_variables import ( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, + OTEL_INSTRUMENTATION_GENAI_UPLOAD_BASE_PATH, +) from ..common.auth import FakeCredentials from ..common.otel_mocker import OTelMocker @@ -309,8 +321,25 @@ def fixture_instrumentor(): return GoogleGenAiSdkInstrumentor() +@pytest.fixture(name="enable_completion_hook") +def fixture_enable_completion_hook(request): + return getattr(request, "param", "default") + + +@pytest.fixture(name="semconv_version") +def fixture_semconv_version(request): + return getattr(request, "param", "default") + + @pytest.fixture(name="internal_instrumentation_setup", autouse=True) -def fixture_setup_instrumentation(instrumentor): +def fixture_setup_instrumentation(instrumentor, enable_completion_hook): + if enable_completion_hook == "enable_completion_hook": + os.environ.update( + { + OTEL_INSTRUMENTATION_GENAI_UPLOAD_BASE_PATH: "memory://", + "OTEL_INSTRUMENTATION_GENAI_COMPLETION_HOOK": "upload", + } + ) instrumentor.instrument() yield instrumentor.uninstrument() @@ -329,11 +358,30 @@ def fixture_otel_mocker(): autouse=True, params=["logcontent", "excludecontent"], ) -def fixture_setup_content_recording(request): +def fixture_setup_content_recording(request, semconv_version): enabled = request.param == "logcontent" - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str( - enabled - ) + # due to some init weirdness, this needs to be updated manually to work, and later restored, + # otherwise, state of this dict leaks to other tests and breaks them. + orig_dict = _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING.copy() + if semconv_version == "experimental": + capture_content = "SPAN_AND_EVENT" if enabled else "NO_CONTENT" + os.environ.update( + { + OTEL_SEMCONV_STABILITY_OPT_IN: "gen_ai_latest_experimental", + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: capture_content, + } + ) + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING.update( + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + } + ) + else: + os.environ[OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT] = str( + enabled + ) + yield + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING = orig_dict @pytest.fixture(name="vcr_record_mode") @@ -490,6 +538,7 @@ async def _gather_all(): return _sync_impl +@pytest.mark.parametrize("semconv_version", ["default"], indirect=True) @pytest.mark.vcr def test_non_streaming(generate_content, model, otel_mocker): response = generate_content( @@ -501,6 +550,7 @@ def test_non_streaming(generate_content, model, otel_mocker): otel_mocker.assert_has_span_named(f"generate_content {model}") +@pytest.mark.parametrize("semconv_version", ["default"], indirect=True) @pytest.mark.vcr def test_streaming(generate_content_stream, model, otel_mocker): count = 0 @@ -513,3 +563,59 @@ def test_streaming(generate_content_stream, model, otel_mocker): count += 1 assert count > 0 otel_mocker.assert_has_span_named(f"generate_content {model}") + + +@pytest.mark.parametrize("semconv_version", ["experimental"], indirect=True) +@pytest.mark.parametrize( + "enable_completion_hook", ["enable_completion_hook"], indirect=True +) +@pytest.mark.vcr +def test_upload_hook_non_streaming( + generate_content, model, otel_mocker: OTelMocker +): + expected_input = [ + { + "parts": [ + { + "content": "Create a haiku about Open Telemetry.", + "type": "text", + } + ], + "role": "user", + } + ] + expected_output = [ + { + "role": "assistant", + "parts": [ + { + "content": "Open data streams,\nMetrics, logs, and traces flow,\nClearly see inside.", + "type": "text", + } + ], + "finish_reason": "stop", + } + ] + _ = generate_content( + model=model, contents="Create a haiku about Open Telemetry." + ) + time.sleep(2) + + event = otel_mocker.get_event_named( + "gen_ai.client.inference.operation.details" + ) + assert_fsspec_equal( + event.attributes["gen_ai.input.messages_ref"], expected_input + ) + + span = otel_mocker.get_span_named(f"generate_content {model}") + assert_fsspec_equal( + span.attributes["gen_ai.output.messages_ref"], expected_output + ) + + +def assert_fsspec_equal(path, value): + # Hide this function and its calls from traceback. + __tracebackhide__ = True # pylint: disable=unused-variable + with fsspec.open(path, "r") as file: + assert json.load(file) == value diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt index 3ee7b91536..7c7d511649 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt @@ -47,4 +47,4 @@ google-genai==1.32.0 # root directory, given invocation from "tox" at root level. -e opentelemetry-instrumentation -e instrumentation-genai/opentelemetry-instrumentation-google-genai --e util/opentelemetry-util-genai +-e util/opentelemetry-util-genai[upload] diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt index 2228363418..72e6bea128 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt @@ -29,4 +29,4 @@ opentelemetry-instrumentation==0.58b0 # Install locally from the folder. This path is relative to the # root directory, given invocation from "tox" at root level. -e instrumentation-genai/opentelemetry-instrumentation-google-genai --e util/opentelemetry-util-genai +-e util/opentelemetry-util-genai[upload]