Skip to content

Commit 45f9a55

Browse files
authored
fix: lift tool error handling to handle guardrails (#747)
1 parent 5c1fa83 commit 45f9a55

6 files changed

Lines changed: 147 additions & 160 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath-langchain"
3-
version = "0.9.16"
3+
version = "0.9.17"
44
description = "Python SDK that enables developers to build and deploy LangGraph agents to the UiPath Cloud Platform"
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.11"

src/uipath_langchain/agent/react/agent.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def create_agent(
6363
6464
Control flow tools (end_execution, raise_error) are auto-injected alongside regular tools.
6565
"""
66-
from ..tools import create_tool_node
66+
from ..tools import create_tool_node, wrap_tools_with_error_handling
6767

6868
if config is None:
6969
config = AgentGraphConfig()
@@ -76,9 +76,7 @@ def create_agent(
7676

7777
init_node = create_init_node(messages, input_schema, config.is_conversational)
7878

79-
tool_nodes = create_tool_node(
80-
agent_tools, handle_tool_errors=config.is_conversational
81-
)
79+
tool_nodes = create_tool_node(agent_tools)
8280

8381
# for conversational agents we transform deeprag's citation format into cas's
8482
if config.is_conversational:
@@ -91,6 +89,13 @@ def create_agent(
9189
tool_nodes_with_guardrails = create_tools_guardrails_subgraph(
9290
tool_nodes, guardrails, input_schema=input_schema
9391
)
92+
93+
processed_tool_nodes = tool_nodes_with_guardrails
94+
if config.is_conversational:
95+
processed_tool_nodes = wrap_tools_with_error_handling(
96+
tool_nodes_with_guardrails
97+
)
98+
9499
terminate_node = create_terminate_node(output_schema, config.is_conversational)
95100

96101
CompleteAgentGraphState = create_state_with_input(
@@ -107,7 +112,7 @@ def create_agent(
107112
)
108113
builder.add_node(AgentGraphNode.INIT, init_with_guardrails_subgraph)
109114

110-
for tool_name, tool_node in tool_nodes_with_guardrails.items():
115+
for tool_name, tool_node in processed_tool_nodes.items():
111116
builder.add_node(tool_name, tool_node)
112117

113118
terminate_with_guardrails_subgraph = create_agent_terminate_guardrails_subgraph(

src/uipath_langchain/agent/tools/__init__.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,12 @@
1313
from .tool_factory import (
1414
create_tools_from_resources,
1515
)
16-
from .tool_node import ToolWrapperMixin, UiPathToolNode, create_tool_node
16+
from .tool_node import (
17+
ToolWrapperMixin,
18+
UiPathToolNode,
19+
create_tool_node,
20+
wrap_tools_with_error_handling,
21+
)
1722

1823
__all__ = [
1924
"create_tools_from_resources",
@@ -28,4 +33,5 @@
2833
"fetch_entity_schemas",
2934
"UiPathToolNode",
3035
"ToolWrapperMixin",
36+
"wrap_tools_with_error_handling",
3137
]

src/uipath_langchain/agent/tools/tool_node.py

Lines changed: 96 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from collections.abc import Sequence
44
from inspect import signature
5-
from typing import Any, Awaitable, Callable, Literal
5+
from typing import Any, Awaitable, Callable, Literal, Mapping
66

77
from langchain_core.messages.tool import ToolCall, ToolMessage
88
from langchain_core.tools import BaseTool
@@ -68,13 +68,11 @@ def __init__(
6868
tool: BaseTool,
6969
wrapper: ToolWrapperType | None = None,
7070
awrapper: AsyncToolWrapperType | None = None,
71-
handle_tool_errors: bool = False,
7271
):
7372
super().__init__(func=self._func, afunc=self._afunc, name=tool.name)
7473
self.tool = tool
7574
self.wrapper = wrapper
7675
self.awrapper = awrapper
77-
self.handle_tool_errors = handle_tool_errors
7876

7977
def _func(self, state: AgentGraphState) -> OutputType:
8078
call = self._extract_tool_call(state)
@@ -90,28 +88,16 @@ def _func(self, state: AgentGraphState) -> OutputType:
9088
# tool confirmation rejected
9189
return self._process_result(call, conversational_confirmation.cancelled)
9290

93-
try:
94-
if self.wrapper:
95-
inputs = self._prepare_wrapper_inputs(
96-
self.wrapper, self.tool, call, state
97-
)
98-
result = self.wrapper(*inputs)
99-
else:
100-
result = self.tool.invoke(call)
101-
output = self._process_result(call, result)
102-
if conversational_confirmation:
103-
# HITL approved - apply confirmation metadata to tool result message
104-
conversational_confirmation.annotate_result(output)
105-
return output
106-
except GraphBubbleUp:
107-
# LangGraph uses exceptions for interrupt control flow — re-raise so
108-
# handle_tool_errors doesn't swallow expected interrupts as errors.
109-
# https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/
110-
raise
111-
except Exception as e:
112-
if self.handle_tool_errors:
113-
return self._process_error_result(call, e)
114-
raise
91+
if self.wrapper:
92+
inputs = self._prepare_wrapper_inputs(self.wrapper, self.tool, call, state)
93+
result = self.wrapper(*inputs)
94+
else:
95+
result = self.tool.invoke(call)
96+
output = self._process_result(call, result)
97+
if conversational_confirmation:
98+
# HITL approved - apply confirmation metadata to tool result message
99+
conversational_confirmation.annotate_result(output)
100+
return output
115101

116102
async def _afunc(self, state: AgentGraphState) -> OutputType:
117103
call = self._extract_tool_call(state)
@@ -127,29 +113,16 @@ async def _afunc(self, state: AgentGraphState) -> OutputType:
127113
# tool confirmation rejected
128114
return self._process_result(call, conversational_confirmation.cancelled)
129115

130-
try:
131-
if self.awrapper:
132-
inputs = self._prepare_wrapper_inputs(
133-
self.awrapper, self.tool, call, state
134-
)
135-
136-
result = await self.awrapper(*inputs)
137-
else:
138-
result = await self.tool.ainvoke(call)
139-
output = self._process_result(call, result)
140-
if conversational_confirmation:
141-
# HITL approved - apply confirmation metadata to tool result message
142-
conversational_confirmation.annotate_result(output)
143-
return output
144-
except GraphBubbleUp:
145-
# LangGraph uses exceptions for interrupt control flow — re-raise so
146-
# handle_tool_errors doesn't swallow expected interrupts as errors.
147-
# https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/
148-
raise
149-
except Exception as e:
150-
if self.handle_tool_errors:
151-
return self._process_error_result(call, e)
152-
raise
116+
if self.awrapper:
117+
inputs = self._prepare_wrapper_inputs(self.awrapper, self.tool, call, state)
118+
result = await self.awrapper(*inputs)
119+
else:
120+
result = await self.tool.ainvoke(call)
121+
output = self._process_result(call, result)
122+
if conversational_confirmation:
123+
# HITL approved - apply confirmation metadata to tool result message
124+
conversational_confirmation.annotate_result(output)
125+
return output
153126

154127
def _extract_tool_call(self, state: AgentGraphState) -> ToolCall | None:
155128
"""Extract the tool call from the state messages."""
@@ -171,16 +144,6 @@ def _extract_tool_call(self, state: AgentGraphState) -> ToolCall | None:
171144

172145
return latest_ai_message.tool_calls[current_tool_call_index]
173146

174-
def _process_error_result(self, call: ToolCall, error: Exception) -> OutputType:
175-
"""Handle tool execution errors by creating an error ToolMessage."""
176-
error_message = ToolMessage(
177-
content=str(error),
178-
name=call["name"],
179-
tool_call_id=call["id"],
180-
status="error",
181-
)
182-
return {"messages": [error_message]}
183-
184147
def _process_result(
185148
self, call: ToolCall, result: dict[str, Any] | Command[Any] | ToolMessage | None
186149
) -> OutputType:
@@ -242,6 +205,78 @@ def _filter_state(
242205
return model_type.model_validate(state, from_attributes=True)
243206

244207

208+
def _get_tool_error_result(
209+
e: Exception, state: AgentGraphState, tool_name: str
210+
) -> OutputType | None:
211+
"""Build an error ToolMessage for the current tool call, or return None to re-raise."""
212+
latest_ai_message = find_latest_ai_message(state.messages)
213+
if latest_ai_message is None:
214+
return None
215+
try:
216+
idx = extract_current_tool_call_index(state.messages, tool_name)
217+
except Exception:
218+
return None
219+
if idx is None:
220+
return None
221+
call = latest_ai_message.tool_calls[idx]
222+
return {
223+
"messages": [
224+
ToolMessage(
225+
content=str(e),
226+
name=call["name"],
227+
tool_call_id=call["id"],
228+
status="error",
229+
)
230+
]
231+
}
232+
233+
234+
def wrap_tools_with_error_handling(
235+
tool_nodes: Mapping[str, RunnableCallable],
236+
) -> dict[str, RunnableCallable]:
237+
"""Wrap tool nodes to catch errors and return them as ToolMessages, rather than failing the entire graph execution."""
238+
return {
239+
tool_name: _wrap_tool_error_handling(tool_node, tool_name)
240+
for tool_name, tool_node in tool_nodes.items()
241+
}
242+
243+
244+
def _wrap_tool_error_handling(
245+
tool_node: RunnableCallable,
246+
tool_name: str,
247+
) -> RunnableCallable:
248+
"""Wrap a tool node to catch errors and return them as ToolMessages, rather than failing the entire graph execution.
249+
250+
Catch and re-raise GraphBubbleUp, since LangGraph uses exceptions for interrupt control flow.
251+
This is so we don't swallow expected interrupts as tool errors.
252+
(https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/)
253+
"""
254+
255+
def _func(state: AgentGraphState) -> OutputType:
256+
try:
257+
return tool_node.invoke(state)
258+
except GraphBubbleUp:
259+
raise
260+
except Exception as e:
261+
result = _get_tool_error_result(e, state, tool_name)
262+
if result is None:
263+
raise
264+
return result
265+
266+
async def _afunc(state: AgentGraphState) -> OutputType:
267+
try:
268+
return await tool_node.ainvoke(state)
269+
except GraphBubbleUp:
270+
raise
271+
except Exception as e:
272+
result = _get_tool_error_result(e, state, tool_name)
273+
if result is None:
274+
raise
275+
return result
276+
277+
return RunnableCallable(func=_func, afunc=_afunc, name=tool_name)
278+
279+
245280
class ToolWrapperMixin:
246281
wrapper: ToolWrapperType | None = None
247282
awrapper: AsyncToolWrapperType | None = None
@@ -256,23 +291,15 @@ def set_tool_wrappers(
256291
self.awrapper = awrapper
257292

258293

259-
def create_tool_node(
260-
tools: Sequence[BaseTool], handle_tool_errors: bool = False
261-
) -> dict[str, UiPathToolNode]:
294+
def create_tool_node(tools: Sequence[BaseTool]) -> dict[str, UiPathToolNode]:
262295
"""Create individual ToolNode for each tool.
263296
264297
Args:
265298
tools: Sequence of tools to create nodes for.
266-
handle_tool_errors: If True, catch tool execution errors and return them as error ToolMessages
267-
instead of letting exceptions propagate.
268299
269300
Returns:
270-
Dict mapping tool.name -> ReactToolNode([tool]).
301+
Dict mapping tool.name -> UiPathToolNode.
271302
Each tool gets its own dedicated node for middleware composition.
272-
273-
Note:
274-
handle_tool_errors=False delegates error handling to LangGraph's error boundary.
275-
handle_tool_errors=True will cause errors to be caught and converted to ToolMessages with status="error".
276303
"""
277304
dict_mapping: dict[str, UiPathToolNode] = {}
278305
for tool in tools:
@@ -281,10 +308,7 @@ def create_tool_node(
281308
tool,
282309
wrapper=tool.wrapper,
283310
awrapper=tool.awrapper,
284-
handle_tool_errors=handle_tool_errors,
285311
)
286312
else:
287-
dict_mapping[tool.name] = UiPathToolNode(
288-
tool, wrapper=None, awrapper=None, handle_tool_errors=handle_tool_errors
289-
)
313+
dict_mapping[tool.name] = UiPathToolNode(tool, wrapper=None, awrapper=None)
290314
return dict_mapping

0 commit comments

Comments
 (0)