Skip to content

Commit 1db9d5b

Browse files
committed
feat(tracing): reconstruct tracing module
1 parent 7cef667 commit 1db9d5b

16 files changed

Lines changed: 359 additions & 349 deletions

veadk/agent.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,9 +112,9 @@ def model_post_init(self, __context: Any) -> None:
112112

113113
self.tools.append(load_memory)
114114

115-
if self.tracers:
116-
for tracer in self.tracers:
117-
tracer.do_hooks(self)
115+
# if self.tracers:
116+
# for tracer in self.tracers:
117+
# tracer.do_hooks(self)
118118

119119
logger.info(f"{self.__class__.__name__} `{self.name}` init done.")
120120
logger.debug(

veadk/runner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,8 @@ def __init__(
6868
# prevent VeRemoteAgent has no long-term memory attr
6969
if isinstance(self.agent, Agent):
7070
self.long_term_memory = self.agent.long_term_memory
71-
for tracer in self.agent.tracers:
72-
tracer.set_app_name(self.app_name)
71+
# for tracer in self.agent.tracers:
72+
# tracer.set_app_name(self.app_name)
7373
else:
7474
self.long_term_memory = None
7575

veadk/tracing/base_tracer.py

Lines changed: 4 additions & 201 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,11 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import json
1615
from abc import ABC, abstractmethod
17-
from typing import Any, Optional
16+
from typing import Optional
1817

19-
from google.adk.agents.callback_context import CallbackContext
2018
from google.adk.agents.invocation_context import InvocationContext
21-
from google.adk.models.llm_request import LlmRequest
22-
from google.adk.models.llm_response import LlmResponse
2319
from google.adk.plugins.base_plugin import BasePlugin
24-
from google.adk.tools import BaseTool, ToolContext
2520
from google.genai import types
2621
from opentelemetry import trace
2722

@@ -102,201 +97,9 @@ def replace_bytes_with_empty(data):
10297

10398
class BaseTracer(ABC):
10499
def __init__(self, name: str):
105-
self.app_name = "veadk_app_name"
106-
pass
100+
self.name = name
101+
self._trace_id = "<unknown_trace_id>"
102+
self._trace_file_path = "<unknown_trace_file_path>"
107103

108104
@abstractmethod
109105
def dump(self, user_id: str, session_id: str, path: str = "/tmp") -> str: ...
110-
111-
def tracer_hook_before_model(
112-
self, callback_context: CallbackContext, llm_request: LlmRequest
113-
) -> Optional[LlmResponse]:
114-
"""agent run stage"""
115-
trace.get_tracer("gcp.vertex.agent")
116-
span = trace.get_current_span()
117-
# logger.debug(f"llm_request: {llm_request}")
118-
119-
req = llm_request.model_dump()
120-
121-
app_name = getattr(self, "app_name", "veadk_app")
122-
agent_name = callback_context.agent_name
123-
model_name = req.get("model", "unknown")
124-
max_tokens = (
125-
None
126-
if not req.get("live_connect_config")
127-
else req["live_connect_config"].get("max_output_tokens", None)
128-
)
129-
temperature = (
130-
None
131-
if not req.get("live_connect_config")
132-
else req["live_connect_config"].get("temperature", None)
133-
)
134-
top_p = (
135-
None
136-
if not req.get("live_connect_config")
137-
else req["live_connect_config"].get("top_p", None)
138-
)
139-
140-
attributes = {}
141-
attributes["agent.name"] = agent_name
142-
attributes["app.name"] = app_name
143-
attributes["gen_ai.system"] = "veadk"
144-
if model_name:
145-
attributes["gen_ai.request.model"] = model_name
146-
attributes["gen_ai.response.model"] = (
147-
model_name # The req model and the resp model should be consistent.
148-
)
149-
attributes["gen_ai.request.type"] = "completion"
150-
if max_tokens:
151-
attributes["gen_ai.request.max_tokens"] = max_tokens
152-
if temperature:
153-
attributes["gen_ai.request.temperature"] = temperature
154-
if top_p:
155-
attributes["gen_ai.request.top_p"] = top_p
156-
157-
# Print attributes for debugging
158-
# print("Tracing attributes:", attributes)
159-
160-
# Set all attributes at once if possible, else fallback to individual
161-
if hasattr(span, "set_attributes"):
162-
span.set_attributes(attributes)
163-
else:
164-
# Fallback for OpenTelemetry versions without set_attributes
165-
for k, v in attributes.items():
166-
span.set_attribute(k, v)
167-
168-
def tracer_hook_after_model(
169-
self, callback_context: CallbackContext, llm_response: LlmResponse
170-
) -> Optional[LlmResponse]:
171-
"""call llm stage"""
172-
trace.get_tracer("gcp.vertex.agent")
173-
span = trace.get_current_span()
174-
# logger.debug(f"llm_response: {llm_response}")
175-
# logger.debug(f"callback_context: {callback_context}")
176-
177-
# Refined: collect all attributes, use set_attributes, print for debugging
178-
attributes = {}
179-
180-
app_name = getattr(self, "app_name", "veadk_app")
181-
agent_name = callback_context.agent_name
182-
attributes["agent.name"] = agent_name
183-
attributes["app.name"] = app_name
184-
attributes["gen_ai.system"] = "veadk"
185-
# prompt
186-
user_content = callback_context.user_content
187-
role = None
188-
content = None
189-
if getattr(user_content, "role", None):
190-
role = getattr(user_content, "role", None)
191-
192-
if user_content and getattr(user_content, "parts", None):
193-
# content = user_content.model_dump_json(exclude_none=True)
194-
content = user_content.model_dump(exclude_none=True).get("parts", None)
195-
if content:
196-
content = replace_bytes_with_empty(content)
197-
content = json.dumps(content, ensure_ascii=False) if content else None
198-
199-
if role and content:
200-
attributes["gen_ai.prompt.0.role"] = role
201-
attributes["gen_ai.prompt.0.content"] = content
202-
203-
# completion
204-
completion_content = getattr(llm_response, "content").model_dump(
205-
exclude_none=True
206-
)
207-
if completion_content:
208-
content = json.dumps(
209-
getattr(llm_response, "content").model_dump(exclude_none=True)["parts"]
210-
)
211-
role = getattr(llm_response, "content").model_dump(exclude_none=True)[
212-
"role"
213-
]
214-
if role and content:
215-
attributes["gen_ai.completion.0.role"] = role
216-
attributes["gen_ai.completion.0.content"] = content
217-
218-
if not llm_response.usage_metadata:
219-
return
220-
221-
# tokens
222-
metadata = llm_response.usage_metadata.model_dump()
223-
if metadata:
224-
prompt_tokens = metadata.get("prompt_token_count", None)
225-
completion_tokens = metadata.get("candidates_token_count", None)
226-
total_tokens = metadata.get("total_token_count", None)
227-
cache_read_input_tokens = (
228-
metadata.get("cache_read_input_tokens") or 0
229-
) # Might change, once openai introduces their equivalent.
230-
cache_create_input_tokens = (
231-
metadata.get("cache_create_input_tokens") or 0
232-
) # Might change, once openai introduces their equivalent.
233-
if prompt_tokens:
234-
attributes["gen_ai.usage.prompt_tokens"] = prompt_tokens
235-
if completion_tokens:
236-
attributes["gen_ai.usage.completion_tokens"] = completion_tokens
237-
if total_tokens:
238-
attributes["gen_ai.usage.total_tokens"] = total_tokens
239-
if cache_read_input_tokens is not None:
240-
attributes["gen_ai.usage.cache_read_input_tokens"] = (
241-
cache_read_input_tokens
242-
)
243-
if cache_create_input_tokens is not None:
244-
attributes["gen_ai.usage.cache_create_input_tokens"] = (
245-
cache_create_input_tokens
246-
)
247-
248-
# Print attributes for debugging
249-
# print("Tracing attributes:", attributes)
250-
251-
# Set all attributes at once if possible, else fallback to individual
252-
if hasattr(span, "set_attributes"):
253-
span.set_attributes(attributes)
254-
else:
255-
# Fallback for OpenTelemetry versions without set_attributes
256-
for k, v in attributes.items():
257-
span.set_attribute(k, v)
258-
259-
def tracer_hook_after_tool(
260-
self,
261-
tool: BaseTool,
262-
args: dict[str, Any],
263-
tool_context: ToolContext,
264-
tool_response: dict,
265-
):
266-
trace.get_tracer("gcp.vertex.agent")
267-
span = trace.get_current_span()
268-
agent_name = tool_context.agent_name
269-
tool_name = tool.name
270-
app_name = getattr(self, "app_name", "veadk_app")
271-
attributes = {
272-
"agent.name": agent_name,
273-
"app.name": app_name,
274-
"tool.name": tool_name,
275-
"gen_ai.system": "veadk",
276-
}
277-
278-
# Set all attributes at once if possible, else fallback to individual
279-
if hasattr(span, "set_attributes"):
280-
span.set_attributes(attributes)
281-
else:
282-
# Fallback for OpenTelemetry versions without set_attributes
283-
for k, v in attributes.items():
284-
span.set_attribute(k, v)
285-
286-
def set_app_name(self, app_name):
287-
self.app_name = app_name
288-
289-
def do_hooks(self, agent) -> None:
290-
if not getattr(agent, "before_model_callback", None):
291-
agent.before_model_callback = []
292-
if not getattr(agent, "after_model_callback", None):
293-
agent.after_model_callback = []
294-
if not getattr(agent, "after_tool_callback", None):
295-
agent.after_tool_callback = []
296-
297-
if self.tracer_hook_before_model not in agent.before_model_callback:
298-
agent.before_model_callback.append(self.tracer_hook_before_model)
299-
if self.tracer_hook_after_model not in agent.after_model_callback:
300-
agent.after_model_callback.append(self.tracer_hook_after_model)
301-
if self.tracer_hook_after_tool not in agent.after_tool_callback:
302-
agent.after_tool_callback.append(self.tracer_hook_after_tool)
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
from veadk.tracing.telemetry.attributes.extractors.common_attributes_extractors import (
2+
common_gen_ai_app_name,
3+
common_gen_ai_session_id,
4+
common_gen_ai_system,
5+
common_gen_ai_system_version,
6+
common_gen_ai_user_id,
7+
)
8+
from veadk.tracing.telemetry.attributes.extractors.llm_attributes_extrators import (
9+
llm_gen_ai_completion,
10+
llm_gen_ai_prompt,
11+
llm_gen_ai_request_model,
12+
llm_gen_ai_request_type,
13+
llm_gen_ai_response_model,
14+
)
15+
16+
ATTRIBUTES = {
17+
"common": {
18+
"gen_ai.system": common_gen_ai_system,
19+
"gen_ai.system_version": common_gen_ai_system_version,
20+
"gen_ai.app.name": common_gen_ai_app_name,
21+
"gen_ai.user.id": common_gen_ai_user_id,
22+
"gen_ai.session.id": common_gen_ai_session_id,
23+
},
24+
"llm": {
25+
"gen_ai.request.model": llm_gen_ai_request_model,
26+
"gen_ai.request.type": llm_gen_ai_request_type,
27+
"gen_ai.response.model": llm_gen_ai_response_model,
28+
"gen_ai.prompt": llm_gen_ai_prompt,
29+
"gen_ai.completion": llm_gen_ai_completion,
30+
},
31+
"tool": ...,
32+
}
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from veadk.version import VERSION
2+
3+
4+
def common_gen_ai_system() -> str:
5+
return "veadk"
6+
7+
8+
def common_gen_ai_system_version() -> str:
9+
return VERSION
10+
11+
12+
def common_gen_ai_app_name(**kwargs) -> str:
13+
return kwargs.get("app_name", "<unknown_app_name>")
14+
15+
16+
def common_gen_ai_user_id(**kwargs) -> str:
17+
return kwargs.get("user_id", "")
18+
19+
20+
def common_gen_ai_session_id(**kwargs) -> str:
21+
return kwargs.get("session_id", "<unknown_session_id>")
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
from attr import dataclass
2+
from google.adk.agents.invocation_context import InvocationContext
3+
from google.adk.models.llm_request import LlmRequest
4+
from google.adk.models.llm_response import LlmResponse
5+
6+
7+
@dataclass
8+
class LLMAttributesParams:
9+
invocation_context: InvocationContext
10+
event_id: str
11+
llm_request: LlmRequest
12+
llm_response: LlmResponse
13+
14+
15+
def llm_gen_ai_request_model(params: LLMAttributesParams) -> str:
16+
return params.llm_request.model or "<unknown_model_name>"
17+
18+
19+
def llm_gen_ai_request_type(params: LLMAttributesParams) -> str | list[str]:
20+
type = "completion"
21+
return type or "<unknown_type>"
22+
23+
24+
def llm_gen_ai_response_model(params: LLMAttributesParams) -> str:
25+
return params.llm_request.model or "<unknown_model_name>"
26+
27+
28+
def llm_gen_ai_request_max_tokens(params: LLMAttributesParams) -> int | None:
29+
return params.llm_request.config.max_output_tokens
30+
31+
32+
def llm_gen_ai_request_temperature(params: LLMAttributesParams) -> float | None:
33+
return params.llm_request.config.temperature
34+
35+
36+
def llm_gen_ai_request_top_p(params: LLMAttributesParams) -> float | None:
37+
return params.llm_request.config.top_p
38+
39+
40+
def llm_gen_ai_prompt(params: LLMAttributesParams) -> list[dict]:
41+
ret = []
42+
for idx, content in enumerate(params.llm_request.contents):
43+
if content.parts:
44+
role = content.role
45+
parts = [part for part in content.parts if not part.inline_data]
46+
ret.append({f".{idx}.role": role, f".{idx}.content": str(parts)})
47+
return ret
48+
49+
50+
def llm_gen_ai_completion(params: LLMAttributesParams) -> list[dict] | None:
51+
ret = []
52+
53+
content = params.llm_response.content
54+
if content and content.parts:
55+
parts = [part for part in content.parts if not part.inline_data]
56+
ret.append({f".{0}.role": content.role, f".{0}.content": str(parts)})
57+
return ret
58+
59+
60+
def llm_gen_ai_response_stop_reason(params: LLMAttributesParams) -> str | None:
61+
return params.llm_response.stop_reason
62+
63+
64+
def llm_gen_ai_response_finish_reason(params: LLMAttributesParams) -> str | None:
65+
return params.llm_response.finish_reason

veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py

Whitespace-only changes.

0 commit comments

Comments
 (0)