1- from contextvars import ContextVar
21import os
32import typing
43from typing import List , Optional , Sequence
109from opentelemetry .trace import Tracer
1110
1211from humanloop .core .client_wrapper import SyncClientWrapper
12+ from humanloop .eval_utils .run import prompt_call_evaluation_aware
1313from humanloop .utilities .types import DecoratorPromptKernelRequestParams
14- from humanloop .eval_utils .context import EVALUATION_CONTEXT_VARIABLE_NAME , EvaluationContext
1514
1615from humanloop .eval_utils import log_with_evaluation_context , run_eval
1716from humanloop .eval_utils .types import Dataset , Evaluator , EvaluatorCheck , File
@@ -38,10 +37,8 @@ def __init__(
3837 self ,
3938 * ,
4039 client_wrapper : SyncClientWrapper ,
41- evaluation_context_variable : ContextVar [Optional [EvaluationContext ]],
4240 ):
4341 super ().__init__ (client_wrapper = client_wrapper )
44- self ._evaluation_context_variable = evaluation_context_variable
4542
4643 def run (
4744 self ,
@@ -70,7 +67,6 @@ def run(
7067 dataset = dataset ,
7168 evaluators = evaluators ,
7269 workers = workers ,
73- evaluation_context_variable = self ._evaluation_context_variable ,
7470 )
7571
7672
@@ -118,31 +114,15 @@ def __init__(
118114 httpx_client = httpx_client ,
119115 )
120116
121- self .evaluation_context_variable : ContextVar [Optional [EvaluationContext ]] = ContextVar (
122- EVALUATION_CONTEXT_VARIABLE_NAME
123- )
124-
125- eval_client = ExtendedEvalsClient (
126- client_wrapper = self ._client_wrapper ,
127- evaluation_context_variable = self .evaluation_context_variable ,
128- )
117+ eval_client = ExtendedEvalsClient (client_wrapper = self ._client_wrapper )
129118 eval_client .client = self
130119 self .evaluations = eval_client
131120 self .prompts = ExtendedPromptsClient (client_wrapper = self ._client_wrapper )
132121
133122 # Overload the .log method of the clients to be aware of Evaluation Context
134- # TODO: Overload the log for Evaluators and Tools once run_id is added
135- # to them.
136- self .prompts = log_with_evaluation_context (
137- client = self .prompts ,
138- evaluation_context_variable = self .evaluation_context_variable ,
139- )
140- # self.evaluators = log_with_evaluation_context(client=self.evaluators)
141- # self.tools = log_with_evaluation_context(client=self.tools)
142- self .flows = log_with_evaluation_context (
143- client = self .flows ,
144- evaluation_context_variable = self .evaluation_context_variable ,
145- )
123+ self .prompts = log_with_evaluation_context (client = self .prompts )
124+ self .prompts = prompt_call_evaluation_aware (client = self .prompts )
125+ self .flows = log_with_evaluation_context (client = self .flows )
146126
147127 if opentelemetry_tracer_provider is not None :
148128 self ._tracer_provider = opentelemetry_tracer_provider
@@ -157,9 +137,8 @@ def __init__(
157137 instrument_provider (provider = self ._tracer_provider )
158138 self ._tracer_provider .add_span_processor (
159139 HumanloopSpanProcessor (
160- exporter = HumanloopSpanExporter (
161- client = self ,
162- )
140+ client = self ,
141+ exporter = HumanloopSpanExporter (client = self ),
163142 ),
164143 )
165144
0 commit comments