1+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2+ # SPDX-License-Identifier: MIT
3+ import logging
4+ from typing import Optional , List , Dict , Any
5+ import json
6+
7+ from cozeloop .internal .consts import TRACE_PROMPT_HUB_SPAN_NAME , TRACE_PROMPT_TEMPLATE_SPAN_NAME
8+ from cozeloop .entities .prompt import PromptTemplate , Prompt , Message , Role
9+ from cozeloop import new_client , set_log_level
10+ from cozeloop .spec .tracespce import ModelMessage , PromptInput , PromptArgument , V_PROMPT_HUB_SPAN_TYPE , PROMPT_KEY , \
11+ INPUT , PROMPT_VERSION , OUTPUT , V_PROMPT_SPAN_TYPE
12+
13+
14+ class LLMRunner :
15+ def __init__ (self , client ):
16+ self .client = client
17+
18+
19+ ERR_CODE_INTERNAL = 600789111
20+
21+
22+ def main ():
23+ # Set the following environment variables first (Assuming you are using a PAT token.).
24+ # COZELOOP_WORKSPACE_ID=your workspace id
25+ # COZELOOP_API_TOKEN=your token
26+
27+ set_log_level (logging .INFO )
28+ client = new_client ()
29+
30+ get_prompt_runner = GetPromptRunner (client = client )
31+
32+ # 1. start span
33+ span = client .start_span ( "root_span" , "main_span" )
34+
35+ # 2. set span tag and baggage
36+ span .set_tags ({
37+ "mode" : "simple" ,
38+ "node_id" : 6076665 ,
39+ "node_process_duration" : 228.6
40+ })
41+
42+ span .set_baggage ({
43+ "product_id" : "123456654321"
44+ })
45+ span .set_user_id_baggage ("123456" )
46+
47+ # assuming call llm
48+ prompt , err = get_prompt_runner .get_prompt ()
49+ if err :
50+ span .set_status_code (ERR_CODE_INTERNAL )
51+ span .set_error (str (err ))
52+
53+ res_prompt = get_prompt_runner .format_prompt (prompt , {"var1" : "你会什么技能" })
54+
55+ # 3. finish span
56+ span .finish ()
57+
58+ # 5. (optional) flush or close
59+ # -- force flush, report all traces in the queue
60+ # Warning! In general, this method is not needed to be call, as spans will be automatically reported in batches.
61+ # Note that flush will block and wait for the report to complete, and it may cause frequent reporting,
62+ # affecting performance.
63+ client .flush ()
64+
65+
66+ def get_prompt () -> [Optional [Prompt ]]:
67+ return Prompt (
68+ prompt_template = PromptTemplate (
69+ messages = [
70+ Message (role = Role .SYSTEM , content = "Hello!" ),
71+ Message (role = Role .USER , content = "Hello! {{var1}}" )
72+ ]
73+ )
74+ )
75+
76+
77+ def do_prompt_format () -> List [Message ]:
78+ return [] # mock
79+
80+
81+ def to_span_prompt_input (messages : List [Message ], arguments : Dict [str , Any ]) -> PromptInput :
82+ return PromptInput (
83+ templates = to_span_messages (messages ),
84+ arguments = to_span_arguments (arguments )
85+ )
86+
87+
88+ def to_span_arguments (arguments : Dict [str , Any ]) -> List [PromptArgument ]:
89+ return [
90+ PromptArgument (key = key , value = value )
91+ for key , value in arguments .items ()
92+ ]
93+
94+
95+ def to_span_messages (messages : List [Message ]) -> List [ModelMessage ]:
96+ return [to_span_message (msg ) for msg in messages ]
97+
98+
99+ def to_span_message (message : Optional [Message ]) -> Optional [ModelMessage ]:
100+ if not message :
101+ return None
102+ return ModelMessage (
103+ role = str (message .role ),
104+ content = message .content if message .content else ""
105+ )
106+
107+
108+ class GetPromptRunner :
109+ def __init__ (self , client ):
110+ self .client = client
111+
112+ def get_prompt (self ) -> Optional [Prompt ]:
113+ span = self .client .start_span ("get_prompt" , V_PROMPT_HUB_SPAN_TYPE , None )
114+ try :
115+ prompt_hub_span = self .client .start_span (TRACE_PROMPT_HUB_SPAN_NAME ,
116+ V_PROMPT_HUB_SPAN_TYPE )
117+ try :
118+ prompt = get_prompt ()
119+
120+ if prompt_hub_span :
121+ prompt_hub_span .set_tags ({
122+ PROMPT_KEY : "test_demo" ,
123+ INPUT : json .dumps ({
124+ PROMPT_KEY : "test_demo" ,
125+ PROMPT_VERSION : "v1.0.1"
126+ }),
127+ PROMPT_VERSION : "v1.0.1" , # mock版本
128+ OUTPUT : prompt
129+ })
130+
131+ return prompt
132+ finally :
133+ if prompt_hub_span :
134+ prompt_hub_span .finish ()
135+ finally :
136+ span .finish ()
137+
138+ def format_prompt (self , prompt : Prompt , variables : Dict [str , Any ]) -> List [Message ]:
139+ span = self .client .start_span ("format_prompt" , V_PROMPT_SPAN_TYPE , None )
140+ try :
141+ prompt_template_span = self .client .start_span (TRACE_PROMPT_TEMPLATE_SPAN_NAME ,
142+ V_PROMPT_SPAN_TYPE )
143+ try :
144+ messages , err = do_prompt_format ()
145+
146+ if prompt_template_span :
147+ prompt_template_span .set_tags ({
148+ PROMPT_KEY : "test_demo" ,
149+ PROMPT_VERSION : "v1.0.1" ,
150+ INPUT : json .dumps (to_span_prompt_input (prompt .prompt_template .messages , variables )),
151+ OUTPUT : json .dumps (to_span_messages (messages ))
152+ })
153+ if err :
154+ prompt_template_span .set_error (str (err ))
155+
156+ return messages , err
157+ finally :
158+ if prompt_template_span :
159+ prompt_template_span .finish ()
160+ finally :
161+ span .finish ()
0 commit comments