Skip to content

Commit 5eb8f2a

Browse files
committed
feat: add tool metrics
1 parent 52091c5 commit 5eb8f2a

File tree

7 files changed

+55
-4823
lines changed

7 files changed

+55
-4823
lines changed

agent.py

Lines changed: 0 additions & 51 deletions
This file was deleted.

deep_eval_test.py

Lines changed: 0 additions & 21 deletions
This file was deleted.

uv.lock

Lines changed: 0 additions & 4692 deletions
This file was deleted.

veadk/agent.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,9 @@ def model_post_init(self, __context: Any) -> None:
140140

141141
if self.long_term_memory is not None:
142142
from google.adk.tools import load_memory
143-
143+
if not load_memory.custom_metadata:
144+
load_memory.custom_metadata = {}
145+
load_memory.custom_metadata["backend"] = self.long_term_memory.backend
144146
self.tools.append(load_memory)
145147

146148
logger.info(f"VeADK version: {VERSION}")

veadk/consts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
}
4343

4444
DEFAULT_APMPLUS_OTEL_EXPORTER_ENDPOINT = "http://apmplus-cn-beijing.volces.com:4317"
45-
DEFAULT_APMPLUS_OTEL_EXPORTER_SERVICE_NAME = "veadk_tracing_demo"
45+
DEFAULT_APMPLUS_OTEL_EXPORTER_SERVICE_NAME = "veadk_tracing"
4646

4747
DEFAULT_COZELOOP_OTEL_EXPORTER_ENDPOINT = (
4848
"https://api.coze.cn/v1/loop/opentelemetry/v1/traces"

veadk/tools/load_knowledgebase_tool.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@
2525

2626
from veadk.knowledgebase import KnowledgeBase
2727
from veadk.knowledgebase.entry import KnowledgebaseEntry
28+
from veadk.utils.logger import get_logger
29+
30+
logger = get_logger(__name__)
2831

2932
if TYPE_CHECKING:
3033
from google.adk.models.llm_request import LlmRequest
@@ -96,6 +99,15 @@ class LoadKnowledgebaseTool(FunctionTool):
9699

97100
def __init__(self):
98101
super().__init__(load_knowledgebase)
102+
global knowledgebase
103+
if knowledgebase is None:
104+
logger.info(
105+
"Get global knowledgebase instance failed, failed to set knowledgebase tool backend."
106+
)
107+
else:
108+
if not self.custom_metadata:
109+
self.custom_metadata = {}
110+
self.custom_metadata["backend"] = knowledgebase.backend
99111

100112
@override
101113
def _get_declaration(self) -> types.FunctionDeclaration | None:

veadk/tracing/telemetry/exporters/apmplus_exporter.py

Lines changed: 39 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,11 @@ class Meters:
127127
"gen_ai.chat_completions.streaming_time_per_output_token"
128128
)
129129

130+
# apmplus metrics
131+
# span duration
132+
APMPLUS_SPAN_LATENCY = "apmplus_span_latency"
133+
# tool token usage
134+
APMPLUS_TOOL_TOKEN_USAGE = "apmplus_tool_token_usage"
130135

131136
class MeterUploader:
132137
def __init__(
@@ -289,21 +294,6 @@ def record_call_llm(
289294
span = trace.get_current_span()
290295
if not span:
291296
return
292-
# operation = "tool"
293-
# operation_type = "load_knowledgebase"
294-
# # 能否获取到 backend 信息?
295-
# operation_backend = "knowledgebase"
296-
# if random.randint(0, 1) == 0:
297-
# operation_type = "load_memory"
298-
# operation_backend = "mem0"
299-
# span_name_attributes = {**attributes,
300-
# # "name": span_name,
301-
# "gen_ai_kind": "tool",
302-
# "gen_ai_operation_name": operation,
303-
# "gen_ai_operation_type": operation_type,
304-
# "gen_ai_operation_backend": operation_backend,
305-
306-
# }
307297

308298
# record span latency
309299
if hasattr(span, "start_time") and self.apmplus_span_latency:
@@ -313,62 +303,54 @@ def record_call_llm(
313303
duration, attributes=attributes
314304
)
315305

316-
# # TODO: trigger with tool record
317-
# if self.apmplus_tool_token_usage:
318-
# tool_token_usage = 122 # tool token 数量,使用文本长度/4
319-
# # TODO: 设置 token_type: input, output
320-
# tool_token_attributes = {**span_name_attributes, "token_type": "input"}
321-
#
322-
# self.apmplus_tool_token_usage.record(
323-
# tool_token_usage, attributes=tool_token_attributes
324-
# )
325-
def record_tool(self):
326-
# TODO: trigger with tool record
327-
attributes = {
328-
"gen_ai_system": "volcengine",
329-
} # required by Volcengine APMPlus
330-
# add span name attribute
306+
def record_tool_call(
307+
self,
308+
tool: BaseTool,
309+
args: dict[str, Any],
310+
function_response_event: Event,
311+
):
312+
logger.debug(f"Record tool call work in progress. Tool: {tool.name}")
331313
span = trace.get_current_span()
332314
if not span:
333315
return
334316
operation = "tool"
335-
operation_type = "load_knowledgebase"
336-
# 能否获取到 backend 信息?
337-
operation_backend = "knowledgebase"
338-
if random.randint(0, 1) == 0:
339-
operation_type = "load_memory"
340-
operation_backend = "mem0"
341-
span_name_attributes = {**attributes,
342-
# "name": span_name,
343-
"gen_ai_kind": "tool",
344-
"gen_ai_operation_name": operation,
345-
"gen_ai_operation_type": operation_type,
346-
"gen_ai_operation_backend": operation_backend,
347-
348-
}
317+
operation_type = tool.name
318+
operation_backend = ""
319+
if tool.custom_metadata:
320+
operation_backend = tool.custom_metadata.get("backend", "")
321+
# if random.randint(0, 1) == 0:
322+
# operation_type = "load_memory"
323+
# operation_backend = "mem0"
324+
attributes = {
325+
"gen_ai_operation_name": operation,
326+
"gen_ai_operation_type": operation_type,
327+
"gen_ai_operation_backend": operation_backend,
328+
}
349329
if hasattr(span, "start_time") and self.apmplus_span_latency:
350330
# span 耗时
351331
duration = (time.time_ns() - span.start_time) / 1e9 # type: ignore
352332
self.apmplus_span_latency.record(
353-
duration, attributes=span_name_attributes
333+
duration, attributes=attributes
354334
)
355-
if self.apmplus_tool_token_usage:
356-
tool_token_usage = 122 # tool token 数量,使用文本长度/4
357-
# TODO: 设置 token_type: input, output
358-
tool_token_attributes = {**span_name_attributes, "token_type": "input"}
359335

336+
if self.apmplus_tool_token_usage and hasattr(span, "attributes"):
337+
tool_input = span.attributes["gen_ai.tool.input"]
338+
tool_token_usage = len(tool_input) / 4 # tool token 数量,使用文本长度/4 # tool token 数量,使用文本长度/4
339+
input_tool_token_attributes = {**attributes, "token_type": "input"}
360340
self.apmplus_tool_token_usage.record(
361-
tool_token_usage, attributes=tool_token_attributes
341+
tool_token_usage, attributes=input_tool_token_attributes
362342
)
363343

344+
tool_output = span.attributes["gen_ai.tool.output"]
345+
tool_token_usage = len(tool_output) / 4 # tool token 数量,使用文本长度/4 # tool token 数量,使用文本长度/4
346+
output_tool_token_attributes = {**attributes, "token_type": "output"}
347+
self.apmplus_tool_token_usage.record(
348+
tool_token_usage, attributes=output_tool_token_attributes
349+
)
350+
351+
352+
364353

365-
def record_tool_call(
366-
self,
367-
tool: BaseTool,
368-
args: dict[str, Any],
369-
function_response_event: Event,
370-
):
371-
logger.debug(f"Record tool call work in progress. Tool: {tool.name}")
372354

373355
class APMPlusExporterConfig(BaseModel):
374356
endpoint: str = Field(

0 commit comments

Comments
 (0)