Skip to content

Commit 5637151

Browse files
committed
fix demo
fix demo
1 parent de306fa commit 5637151

8 files changed

Lines changed: 232 additions & 22 deletions

File tree

CHANGLOG.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
2+
## [0.1.2] - 2025-04-07
3+
4+
First release of cozeloop sdk.
5+
It contains function for prompt_hub and trace.
6+
7+
### Added
8+
9+
- Initial cozeloop trace specific.
10+
- Initial cozeloop trace and prompt_hub SDK packages.
11+
- Example code for trace of large_text, multi_modality, parent_child, prompt and etc.
12+
- Example code for prompt_hub.
13+
- Exporters code for trace.
14+
- Project guidelines and other information and in the form of a README and CONTRIBUTING.
15+
- MIT license.

cozeloop/integration/langchain/trace_callback.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,27 +16,34 @@
1616
from langchain_core.prompts import AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
1717
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
1818

19-
from cozeloop import Span
19+
from cozeloop import Span, Client
2020
from cozeloop._client import get_default_client
2121
from cozeloop.integration.langchain.trace_model.llm_model import ModelTraceInput, ModelMeta, ModelTraceOutput, Message
2222
from cozeloop.integration.langchain.trace_model.prompt_template import PromptTraceOutput, Argument, PromptTraceInput
2323
from cozeloop.integration.langchain.trace_model.runtime import RuntimeInfo
2424
from cozeloop.integration.langchain.util import calc_token_usage, get_prompt_tag
2525

26+
_trace_callback_client = None
2627

2728
class LoopTracer:
2829
@classmethod
29-
def get_callback_handler(cls):
30+
def get_callback_handler(cls, client: Client = None):
3031
"""
3132
Do not hold it for a long time, get a new callback_handler for each request.
3233
"""
34+
global _trace_callback_client
35+
if client:
36+
_trace_callback_client = client
37+
else:
38+
_trace_callback_client = get_default_client()
39+
3340
return LoopTraceCallbackHandler()
3441

3542

3643
class LoopTraceCallbackHandler(BaseCallbackHandler):
3744
def __init__(self):
3845
super().__init__()
39-
self._space_id = get_default_client().workspace_id()
46+
self._space_id = _trace_callback_client.workspace_id
4047
self.run_map: Dict[str, Run] = {}
4148

4249
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> Any:
@@ -230,7 +237,7 @@ def _new_flow_span(self, span_name: str, span_type: str, **kwargs: Any) -> Span:
230237
if 'parent_run_id' in kwargs and kwargs['parent_run_id'] is not None:
231238
parent_span = self.run_map[str(kwargs['parent_run_id'])].span
232239
# new span
233-
flow_span = get_default_client().start_span(span_name, span_type, child_of=parent_span)
240+
flow_span = _trace_callback_client.start_span(span_name, span_type, child_of=parent_span)
234241
run_id = str(kwargs['run_id'])
235242
self.run_map[run_id] = Run(run_id, flow_span, span_type)
236243
# set default tags

cozeloop/spec/tracespce/span_value.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# SPDX-License-Identifier: MIT
33

44
# SpanType tag builtin values
5+
V_PROMPT_HUB_SPAN_TYPE = "prompt"
56
V_PROMPT_SPAN_TYPE = "prompt"
67
V_MODEL_SPAN_TYPE = "model"
78
V_RETRIEVER_SPAN_TYPE = "retriever"

examples/lcel/lcel.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,34 +3,34 @@
33

44
import logging
55
import os
6+
import time
67

78
from langchain.callbacks.tracers import ConsoleCallbackHandler
89
from langchain_core.runnables import RunnableConfig
910
from langchain_openai import AzureChatOpenAI
1011
from langchain_core.output_parsers import StrOutputParser
1112

12-
from cozeloop import set_log_level
13+
from cozeloop import set_log_level, new_client
1314
from cozeloop.integration.langchain.trace_callback import LoopTracer
1415

1516
logger = logging.getLogger(__name__)
1617

1718
def do_lcel_demo():
1819
# Configure the parameters for the large model. The keys in os.environ are standard keys for Langchain and must be
1920
# followed. This is just a demo, and the connectivity of the large model needs to be ensured by the user.
20-
os.environ['AZURE_OPENAI_API_KEY'] = 'xxx' # need set a llm api key
21-
os.environ['OPENAI_API_VERSION'] = '2024-05-13' # llm version, see more: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
22-
os.environ['AZURE_OPENAI_ENDPOINT'] = 'https://xxx' # llm endpoint
23-
os.environ['AUZURE_DEPLOYMENT'] = 'gpt-4o-2024-05-13'
21+
# os.environ['AZURE_OPENAI_API_KEY'] = 'xxx' # need set a llm api key
22+
# os.environ['OPENAI_API_VERSION'] = '2024-05-13' # llm version, see more: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
23+
# os.environ['AZURE_OPENAI_ENDPOINT'] = 'https://xxx' # llm endpoint
24+
# os.environ['AUZURE_DEPLOYMENT'] = 'gpt-4o-2024-05-13'
2425

2526
# Configure the Loop environment variables. This is just a demo, and the keys in os.environ are not for reference.
2627
# The specific implementation method is determined by the business side.
2728
# Set the following environment variables first (Assuming you are using a PAT token.).
28-
# COZELOOP_WORKSPACE_ID=your workspace id
29-
# COZELOOP_API_TOKEN=your token
30-
os.environ['COZELOOP_API_TOKEN'] = 'your token'
31-
os.environ['COZELOOP_WORKSPACE_ID'] = 'your workspace'
29+
# os.environ['COZELOOP_API_TOKEN'] = 'your token'
30+
# os.environ['COZELOOP_WORKSPACE_ID'] = 'your workspace id'
3231

33-
trace_callback_handler = LoopTracer.get_callback_handler()
32+
client = new_client(ultra_large_report=True)
33+
trace_callback_handler = LoopTracer.get_callback_handler(client)
3434
# init llm model
3535
llm_model = AzureChatOpenAI(azure_deployment=os.environ['AUZURE_DEPLOYMENT'])
3636

@@ -40,6 +40,7 @@ def do_lcel_demo():
4040
input='用你所学的技巧,帮我生成几个有意思的问题',
4141
config=RunnableConfig(callbacks=[ConsoleCallbackHandler(), trace_callback_handler])
4242
)
43+
time.sleep(5) # async report, so sleep wait for report finish
4344
print('\n====== model output start ======\n' + output + '\n====== model output finish ======\n')
4445

4546

examples/lcel/lcel_stream.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
import logging
55
import os
6+
import time
67

78
from langchain.callbacks.tracers import ConsoleCallbackHandler
89
from langchain_core.runnables import RunnableConfig
@@ -17,19 +18,19 @@
1718
def do_lcel_stream_demo():
1819
# Configure the parameters for the llm. The keys in os.environ are standard keys for Langchain and must be
1920
# followed. This is just a demo, and the connectivity of the llm needs to be ensured by the user.
20-
os.environ['AZURE_OPENAI_API_KEY'] = 'xxx' # need set a llm api key
21-
os.environ['OPENAI_API_VERSION'] = '2024-05-13' # llm version, see more: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
22-
os.environ['AZURE_OPENAI_ENDPOINT'] = 'https://xxx' # llm endpoint
23-
os.environ['AUZURE_DEPLOYMENT'] = 'gpt-4o-2024-05-13'
21+
# os.environ['AZURE_OPENAI_API_KEY'] = 'xxx' # need set a llm api key
22+
# os.environ['OPENAI_API_VERSION'] = '2024-05-13' # llm version, see more: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
23+
# os.environ['AZURE_OPENAI_ENDPOINT'] = 'https://xxx' # llm endpoint
24+
# os.environ['AUZURE_DEPLOYMENT'] = 'gpt-4o-2024-05-13'
2425

2526
# Configure the Loop environment variables. This is just a demo, and the keys in os.environ are not for reference.
2627
# The specific implementation method is determined by the business side.
2728

2829
# Set the following environment variables first (Assuming you are using a PAT token.).
2930
# COZELOOP_WORKSPACE_ID=your workspace id
3031
# COZELOOP_API_TOKEN=your token
31-
os.environ['COZELOOP_API_TOKEN'] = 'your token'
32-
os.environ['COZELOOP_WORKSPACE_ID'] = 'your workspace'
32+
# os.environ['COZELOOP_API_TOKEN'] = 'your token'
33+
# os.environ['COZELOOP_WORKSPACE_ID'] = 'your workspace'
3334

3435
trace_callback_handler = LoopTracer.get_callback_handler()
3536
# init llm model
@@ -45,6 +46,9 @@ def do_lcel_stream_demo():
4546
chunks.append(chunk)
4647
print(chunk, end='', flush=True)
4748

49+
time.sleep(5) # async report, so sleep wait for report finish
50+
print('\n====== model output start ======\n' + ''.join(chunks) + '\n====== model output finish ======\n')
51+
4852

4953
if __name__ == "__main__":
5054
set_log_level(logging.INFO)

examples/prompt/prompt_hub.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,12 @@
33

44
import json
55
import time
6+
from typing import List
67

78
import cozeloop
89
from cozeloop import Message
910
from cozeloop.entities.prompt import Role
11+
from cozeloop.spec.tracespce import CALL_OPTIONS, ModelCallOption, ModelMessage, ModelInput
1012

1113

1214
class LLMRunner:
@@ -34,7 +36,7 @@ def llm_call(self, input_data):
3436
output_token = 1211
3537

3638
# set tag key: `input`
37-
span.set_input(input_data)
39+
span.set_input(convert_model_input(input_data))
3840
# set tag key: `output`
3941
span.set_output(output)
4042
# set tag key: `model_provider`, e.g., openai, etc.
@@ -52,6 +54,14 @@ def llm_call(self, input_data):
5254
span.set_output_tokens(output_token)
5355
# set tag key: `model_name`, e.g., gpt-4-1106-preview, etc.
5456
span.set_model_name("gpt-4-1106-preview")
57+
span.set_tags(CALL_OPTIONS, ModelCallOption(
58+
temperature=0.5,
59+
top_p=0.5,
60+
top_k=10,
61+
presence_penalty=0.5,
62+
frequency_penalty=0.5,
63+
max_tokens=1024,
64+
))
5565

5666
return None
5767
except Exception as e:
@@ -121,3 +131,14 @@ def llm_call(self, input_data):
121131
client.flush()
122132

123133

134+
def convert_model_input(messages: List[Message]) -> ModelInput:
135+
model_messages = []
136+
for message in messages:
137+
model_messages.append(ModelMessage(
138+
role=str(message.role),
139+
content=message.content if message.content is not None else ""
140+
))
141+
142+
return ModelInput(
143+
messages=model_messages
144+
)

examples/trace/prompt.py

Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2+
# SPDX-License-Identifier: MIT
3+
import logging
4+
from typing import Optional, List, Dict, Any
5+
import json
6+
7+
from cozeloop.internal.consts import TRACE_PROMPT_HUB_SPAN_NAME, TRACE_PROMPT_TEMPLATE_SPAN_NAME
8+
from cozeloop.entities.prompt import PromptTemplate, Prompt, Message, Role
9+
from cozeloop import new_client, set_log_level
10+
from cozeloop.spec.tracespce import ModelMessage, PromptInput, PromptArgument, V_PROMPT_HUB_SPAN_TYPE, PROMPT_KEY, \
11+
INPUT, PROMPT_VERSION, OUTPUT, V_PROMPT_SPAN_TYPE
12+
13+
14+
class LLMRunner:
15+
def __init__(self, client):
16+
self.client = client
17+
18+
19+
ERR_CODE_INTERNAL = 600789111
20+
21+
22+
def main():
23+
# Set the following environment variables first (Assuming you are using a PAT token.).
24+
# COZELOOP_WORKSPACE_ID=your workspace id
25+
# COZELOOP_API_TOKEN=your token
26+
27+
set_log_level(logging.INFO)
28+
client = new_client()
29+
30+
get_prompt_runner = GetPromptRunner(client=client)
31+
32+
# 1. start span
33+
span = client.start_span( "root_span", "main_span")
34+
35+
# 2. set span tag and baggage
36+
span.set_tags({
37+
"mode": "simple",
38+
"node_id": 6076665,
39+
"node_process_duration": 228.6
40+
})
41+
42+
span.set_baggage({
43+
"product_id": "123456654321"
44+
})
45+
span.set_user_id_baggage("123456")
46+
47+
# assuming call llm
48+
prompt, err = get_prompt_runner.get_prompt()
49+
if err:
50+
span.set_status_code(ERR_CODE_INTERNAL)
51+
span.set_error(str(err))
52+
53+
res_prompt = get_prompt_runner.format_prompt(prompt, {"var1": "你会什么技能"})
54+
55+
# 3. finish span
56+
span.finish()
57+
58+
# 5. (optional) flush or close
59+
# -- force flush, report all traces in the queue
60+
# Warning! In general, this method is not needed to be call, as spans will be automatically reported in batches.
61+
# Note that flush will block and wait for the report to complete, and it may cause frequent reporting,
62+
# affecting performance.
63+
client.flush()
64+
65+
66+
def get_prompt() -> [Optional[Prompt]]:
67+
return Prompt(
68+
prompt_template=PromptTemplate(
69+
messages=[
70+
Message(role=Role.SYSTEM, content="Hello!"),
71+
Message(role=Role.USER, content="Hello! {{var1}}")
72+
]
73+
)
74+
)
75+
76+
77+
def do_prompt_format() -> List[Message]:
78+
return [] # mock
79+
80+
81+
def to_span_prompt_input(messages: List[Message], arguments: Dict[str, Any]) -> PromptInput:
82+
return PromptInput(
83+
templates=to_span_messages(messages),
84+
arguments=to_span_arguments(arguments)
85+
)
86+
87+
88+
def to_span_arguments(arguments: Dict[str, Any]) -> List[PromptArgument]:
89+
return [
90+
PromptArgument(key=key, value=value)
91+
for key, value in arguments.items()
92+
]
93+
94+
95+
def to_span_messages(messages: List[Message]) -> List[ModelMessage]:
96+
return [to_span_message(msg) for msg in messages]
97+
98+
99+
def to_span_message(message: Optional[Message]) -> Optional[ModelMessage]:
100+
if not message:
101+
return None
102+
return ModelMessage(
103+
role=str(message.role),
104+
content=message.content if message.content else ""
105+
)
106+
107+
108+
class GetPromptRunner:
109+
def __init__(self, client):
110+
self.client = client
111+
112+
def get_prompt(self) -> Optional[Prompt]:
113+
span = self.client.start_span("get_prompt", V_PROMPT_HUB_SPAN_TYPE, None)
114+
try:
115+
prompt_hub_span = self.client.start_span(TRACE_PROMPT_HUB_SPAN_NAME,
116+
V_PROMPT_HUB_SPAN_TYPE)
117+
try:
118+
prompt = get_prompt()
119+
120+
if prompt_hub_span:
121+
prompt_hub_span.set_tags({
122+
PROMPT_KEY: "test_demo",
123+
INPUT: json.dumps({
124+
PROMPT_KEY: "test_demo",
125+
PROMPT_VERSION: "v1.0.1"
126+
}),
127+
PROMPT_VERSION: "v1.0.1", # mock版本
128+
OUTPUT: prompt
129+
})
130+
131+
return prompt
132+
finally:
133+
if prompt_hub_span:
134+
prompt_hub_span.finish()
135+
finally:
136+
span.finish()
137+
138+
def format_prompt(self, prompt: Prompt, variables: Dict[str, Any]) -> List[Message]:
139+
span = self.client.start_span("format_prompt", V_PROMPT_SPAN_TYPE, None)
140+
try:
141+
prompt_template_span = self.client.start_span(TRACE_PROMPT_TEMPLATE_SPAN_NAME,
142+
V_PROMPT_SPAN_TYPE)
143+
try:
144+
messages, err = do_prompt_format()
145+
146+
if prompt_template_span:
147+
prompt_template_span.set_tags({
148+
PROMPT_KEY: "test_demo",
149+
PROMPT_VERSION: "v1.0.1",
150+
INPUT: json.dumps(to_span_prompt_input(prompt.prompt_template.messages, variables)),
151+
OUTPUT: json.dumps(to_span_messages(messages))
152+
})
153+
if err:
154+
prompt_template_span.set_error(str(err))
155+
156+
return messages, err
157+
finally:
158+
if prompt_template_span:
159+
prompt_template_span.finish()
160+
finally:
161+
span.finish()

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "cozeloop"
3-
version = "0.1.0"
3+
version = "0.1.2"
44
description = "coze loop sdk"
55
authors = ["JiangQi715 <jiangqi.rrt@bytedance.com>"]
66
license = "MIT"

0 commit comments

Comments
 (0)