diff --git a/src/uipath_langchain/agent/exceptions/__init__.py b/src/uipath_langchain/agent/exceptions/__init__.py new file mode 100644 index 00000000..3336c908 --- /dev/null +++ b/src/uipath_langchain/agent/exceptions/__init__.py @@ -0,0 +1,6 @@ +from .exceptions import AgentNodeRoutingException, AgentTerminationException + +__all__ = [ + "AgentNodeRoutingException", + "AgentTerminationException", +] diff --git a/src/uipath_langchain/agent/react/exceptions.py b/src/uipath_langchain/agent/exceptions/exceptions.py similarity index 100% rename from src/uipath_langchain/agent/react/exceptions.py rename to src/uipath_langchain/agent/exceptions/exceptions.py diff --git a/src/uipath_langchain/agent/guardrails/__init__.py b/src/uipath_langchain/agent/guardrails/__init__.py new file mode 100644 index 00000000..9e1aff43 --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/__init__.py @@ -0,0 +1,13 @@ +from .guardrail_nodes import create_llm_guardrail_node, create_tool_guardrail_node, create_agent_guardrail_node +from .guardrails_subgraph import create_llm_guardrails_subgraph, create_agent_guardrails_subgraph, \ + create_tool_guardrails_subgraph + +__all__ = [ + "create_llm_guardrails_subgraph", + "create_agent_guardrails_subgraph", + "create_tool_guardrails_subgraph", + "create_llm_guardrail_node", + "create_agent_guardrail_node", + "create_tool_guardrail_node", +] + diff --git a/src/uipath_langchain/agent/guardrails/actions/__init__.py b/src/uipath_langchain/agent/guardrails/actions/__init__.py new file mode 100644 index 00000000..6fd77667 --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/actions/__init__.py @@ -0,0 +1,14 @@ +from .base_action import GuardrailAction +from .block_action import BlockAction +from .escalate_action import EscalateAction +from .log_action import LogAction + +__all__ = [ + "GuardrailAction", + "BlockAction", + "LogAction", + "EscalateAction", +] + + + diff --git a/src/uipath_langchain/agent/guardrails/actions/base_action.py b/src/uipath_langchain/agent/guardrails/actions/base_action.py new file mode 100644 index 00000000..addf417e --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/actions/base_action.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Literal, Tuple, Callable, Any + +from uipath.platform.guardrails import CustomGuardrail, BuiltInValidatorGuardrail, GuardrailScope + +from ..types import AgentGuardrailsGraphState + + +class GuardrailAction(ABC): + """Extensible action interface producing a node for validation failure.""" + + @abstractmethod + def action_node( + self, + *, + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + scope: GuardrailScope, + execution_stage: Literal["PreExecution", "PostExecution"], + ) -> GuardrailActionNode: + """Create and return the GraphNode to execute on validation failure.""" + ... + + +GuardrailActionNode = Tuple[str, Callable[[AgentGuardrailsGraphState], Any]] diff --git a/src/uipath_langchain/agent/guardrails/actions/block_action.py b/src/uipath_langchain/agent/guardrails/actions/block_action.py new file mode 100644 index 00000000..bd160a03 --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/actions/block_action.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +import re +from typing import Literal, Dict, Any + +from uipath.platform.guardrails import CustomGuardrail, BuiltInValidatorGuardrail, GuardrailScope +from uipath.runtime.errors import UiPathErrorCode, UiPathErrorCategory + +from ..types import AgentGuardrailsGraphState +from .base_action import GuardrailAction, GuardrailActionNode +from ...exceptions import AgentTerminationException + + +class BlockAction(GuardrailAction): + """Action that terminates execution when a guardrail fails. + + Args: + reason: Optional reason string to include in the raised exception title. + """ + + def __init__(self, reason: str) -> None: + self.reason = reason + + def action_node( + self, + *, + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + scope: GuardrailScope, + execution_stage: Literal["PreExecution", "PostExecution"], + ) -> GuardrailActionNode: + sanitized = re.sub(r"\W+", "_", getattr(guardrail, "name", "guardrail")).strip( + "_" + ) + node_name = f"{sanitized}_{execution_stage.lower()}_{scope.lower()}_block" + + async def _node(_state: AgentGuardrailsGraphState) -> Dict[str, Any]: + raise AgentTerminationException( + code=UiPathErrorCode.EXECUTION_ERROR, + title="Guardrail violation", + detail=self.reason, + category=UiPathErrorCategory.USER, + ) + + return node_name, _node diff --git a/src/uipath_langchain/agent/guardrails/actions/escalate_action.py b/src/uipath_langchain/agent/guardrails/actions/escalate_action.py new file mode 100644 index 00000000..5440858c --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/actions/escalate_action.py @@ -0,0 +1,240 @@ +from __future__ import annotations + +import json +import re +from typing import Any, Dict, Literal + +from langchain_core.messages import AIMessage +from langgraph.types import Command, interrupt +from uipath.platform.common import CreateEscalation +from uipath.platform.guardrails import ( + BuiltInValidatorGuardrail, + CustomGuardrail, + GuardrailScope, +) +from uipath.runtime.errors import UiPathErrorCode + +from ...exceptions import AgentTerminationException +from ..guardrail_nodes import _message_text +from ..types import AgentGuardrailsGraphState +from .base_action import GuardrailAction, GuardrailActionNode + + +class EscalateAction(GuardrailAction): + """Node-producing action that inserts a HITL interruption node into the graph. + + The returned node triggers a dynamic interrupt for HITL without re-evaluating. + The runtime will persist a resume trigger and suspend execution. + """ + + def __init__( + self, + app_name: str, + app_folder_path: str, + title: str, + version: int, + assignee: str, + ): + self.app_name = app_name + self.app_folder_path = app_folder_path + self.title = title + self.version = version + self.assignee = assignee + + def action_node( + self, + *, + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + scope: GuardrailScope, + execution_stage: Literal["PreExecution", "PostExecution"], + ) -> GuardrailActionNode: + sanitized = re.sub(r"\W+", "_", guardrail.name).strip("_").lower() + node_name = f"{sanitized}_hitl_{execution_stage}_{scope.lower()}" + + async def _node(state: AgentGuardrailsGraphState) -> Dict[str, Any]: + input = _extract_escalation_content(state, scope, execution_stage) + tool_field = _hook_type_to_tool_field(execution_stage) + data = { + "GuardrailName": guardrail.name, + "GuardrailDescription": guardrail.description, + "TenantName": "AgentsRuntime", + "AgentTrace": "https://alpha.uipath.com/f88fa028-ccdd-4b5f-bee4-01ef94d134d8/studio_/designer/48fff406-52e9-4a37-ba66-76c0212d9c6b", + "Tool": "Create_Issue", + "ExecutionStage": execution_stage, + "GuardrailResult": state.guardrail_validation_result, + tool_field: input, + } + escalation_result = interrupt( + CreateEscalation( + app_name=self.app_name, + app_folder_path=self.app_folder_path, + title="Test", + data=data, + app_version=self.version, + assignee=self.assignee, + ) + ) + + if escalation_result.action == "Approve": + return _process_escalation_response( + state, escalation_result.data, scope, execution_stage + ) + + raise AgentTerminationException( + code=UiPathErrorCode.CREATE_RESUME_TRIGGER_ERROR, + title="Escalation rejected", + detail="Escalation rejected", + ) + + return node_name, _node + + +def _process_escalation_response( + state: AgentGuardrailsGraphState, + escalation_result: Dict[str, Any], + scope: GuardrailScope, + hook_type: Literal["PreExecution", "PostExecution"], +) -> Dict[str, Any] | Command: + """Process escalation response and update state based on guardrail scope. + + Args: + state: The current agent graph state. + escalation_result: The result from the escalation interrupt. + scope: The guardrail scope (LLM/AGENT/TOOL). + hook_type: The hook type ("PreExecution" or "PostExecution"). + + Returns: + For LLM scope: Command to update messages with reviewed inputs/outputs. + For non-LLM scope: Empty dict (no message alteration). + + Raises: + AgentTerminationException: If escalation response processing fails. + """ + if scope != GuardrailScope.LLM: + return {} + + try: + reviewed_field = ( + "ReviewedInputs" if hook_type == "PreExecution" else "ReviewedOutputs" + ) + + msgs = state.messages.copy() + if not msgs or reviewed_field not in escalation_result: + return {} + + last_message = msgs[-1] + + if hook_type == "PreExecution": + reviewed_content = escalation_result[reviewed_field] + if reviewed_content: + last_message.content = json.loads(reviewed_content) + else: + reviewed_outputs_json = escalation_result[reviewed_field] + if not reviewed_outputs_json: + return {} + + content_list = json.loads(reviewed_outputs_json) + if not content_list: + return {} + + ai_message: AIMessage = last_message # type: ignore[assignment] + content_index = 0 + + if ai_message.tool_calls: + tool_calls = list(ai_message.tool_calls) + for tool_call in tool_calls: + args = ( + tool_call["args"] + if isinstance(tool_call, dict) + else tool_call.args + ) + if ( + isinstance(args, dict) + and "content" in args + and args["content"] is not None + ): + if content_index < len(content_list): + updated_content = json.loads(content_list[content_index]) + args["content"] = updated_content + if isinstance(tool_call, dict): + tool_call["args"] = args + else: + tool_call.args = args + content_index += 1 + ai_message.tool_calls = tool_calls + + if len(content_list) > content_index: + ai_message.content = content_list[-1] + + return Command(update={"messages": msgs}) + except Exception as e: + raise AgentTerminationException( + code=UiPathErrorCode.EXECUTION_ERROR, + title="Escalation rejected", + detail=str(e) + ) from e + + +def _extract_escalation_content( + state: AgentGuardrailsGraphState, + scope: GuardrailScope, + hook_type: Literal["PreExecution", "PostExecution"], +) -> str: + """Extract escalation content from state based on guardrail scope and hook type. + + Args: + state: The current agent graph state. + scope: The guardrail scope (LLM/AGENT/TOOL). + hook_type: The hook type ("PreExecution" or "PostExecution"). + + Returns: + For non-LLM scope: Empty string. + For LLM PreExecution: JSON string with message content. + For LLM PostExecution: JSON array with tool call content and message content. + """ + if scope != GuardrailScope.LLM: + return "" + + if not state.messages: + raise AgentTerminationException( + code=UiPathErrorCode.EXECUTION_ERROR, + title="Invalid state message", + ) + + last_message = state.messages[-1] + if hook_type == "PreExecution": + content = _message_text(last_message) + return json.dumps(content) if content else "" + + ai_message: AIMessage = last_message # type: ignore[assignment] + content_list: list[str] = [] + + if ai_message.tool_calls: + for tool_call in ai_message.tool_calls: + args = tool_call["args"] if isinstance(tool_call, dict) else tool_call.args + if ( + isinstance(args, dict) + and "content" in args + and args["content"] is not None + ): + content_list.append(json.dumps(args["content"])) + + message_content = _message_text(last_message) + if message_content: + content_list.append(message_content) + + return json.dumps(content_list) + + +def _hook_type_to_tool_field( + hook_type: Literal["PreExecution", "PostExecution"], +) -> str: + """Convert hook type to tool field name. + + Args: + hook_type: The hook type ("PreExecution" or "PostExecution"). + + Returns: + "ToolInputs" for "PreExecution", "ToolOutputs" for "PostExecution". + """ + return "ToolInputs" if hook_type == "PreExecution" else "ToolOutputs" diff --git a/src/uipath_langchain/agent/guardrails/actions/log_action.py b/src/uipath_langchain/agent/guardrails/actions/log_action.py new file mode 100644 index 00000000..fe2dcab7 --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/actions/log_action.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import logging +import re +from typing import Literal, Dict, Any + +from uipath.platform.guardrails import CustomGuardrail, BuiltInValidatorGuardrail, GuardrailScope + +from .base_action import GuardrailAction, GuardrailActionNode +from ..types import AgentGuardrailsGraphState + + +class LogAction(GuardrailAction): + """Action that logs guardrail violations and continues.""" + + def __init__(self, level: int = logging.WARNING) -> None: + self.level = level + + def action_node( + self, + *, + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + scope: GuardrailScope, + execution_stage: Literal["PreExecution", "PostExecution"], + ) -> GuardrailActionNode: + sanitized = re.sub(r"\W+", "_", getattr(guardrail, "name", "guardrail")).strip( + "_" + ) + node_name = f"{sanitized}_{execution_stage.lower()}_{scope.lower()}_log" + + # TODO: add complete implementation for Log action + async def _node(_state: AgentGuardrailsGraphState) -> Dict[str, Any]: + print( + self.level, + "Guardrail '%s' failed at %s %s: %s", + guardrail.name, + execution_stage, + scope.value if hasattr(scope, "value") else str(scope), + _state.guardrail_validation_result, + ) + return {} + + return node_name, _node diff --git a/src/uipath_langchain/agent/guardrails/guardrail_nodes.py b/src/uipath_langchain/agent/guardrails/guardrail_nodes.py new file mode 100644 index 00000000..dce33e5d --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/guardrail_nodes.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +import logging +import re +from typing import Any, Callable, Dict, Literal, Tuple + +from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage +from langgraph.types import Command +from uipath.platform import UiPath +from uipath.platform.guardrails import ( + BuiltInValidatorGuardrail, + CustomGuardrail, + GuardrailScope, +) + +from .types import AgentGuardrailsGraphState + +logger = logging.getLogger(__name__) + + +def _message_text(msg: AnyMessage) -> str: + if isinstance(msg, (HumanMessage, SystemMessage)): + return msg.content if isinstance(msg.content, str) else str(msg.content) + return str(getattr(msg, "content", "")) if hasattr(msg, "content") else "" + + +def _create_guardrail_node( + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + scope: GuardrailScope, + execution_stage: Literal["PreExecution", "PostExecution"], + payload_generator: Callable[[AgentGuardrailsGraphState], str], + success_node: str, + failure_node: str, +) -> Tuple[str, Callable[[AgentGuardrailsGraphState], Any]]: + """Private factory for guardrail evaluation nodes. + + Returns a node that evaluates the guardrail and routes via Command: + - goto success_node on validation pass + - goto failure_node on validation fail + """ + raw_node_name = f"{scope.lower()}_{execution_stage}_{guardrail.name}" + node_name = re.sub(r"\W+", "_", raw_node_name).strip("_").lower() + + async def node(state: AgentGuardrailsGraphState) -> Dict[str, Any] | Command: + text = payload_generator(state) + try: + uipath = UiPath() + result = uipath.guardrails.evaluate_guardrail(text, guardrail) + except Exception as exc: + logger.error("Failed to evaluate guardrail: %s", exc) + raise + + if not result.validation_passed: + return Command( + goto=failure_node, update={"guardrail_validation_result": result.reason} + ) + return Command(goto=success_node, update={"guardrail_validation_result": None}) + + return node_name, node + + +def create_llm_guardrail_node( + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + execution_stage: Literal["PreExecution", "PostExecution"], + success_node: str, + failure_node: str, +) -> Tuple[str, Callable[[AgentGuardrailsGraphState], Any]]: + def _payload_generator(state: AgentGuardrailsGraphState) -> str: + if not state.messages: + return "" + return _message_text(state.messages[-1]) + + return _create_guardrail_node( + guardrail, + GuardrailScope.LLM, + execution_stage, + _payload_generator, + success_node, + failure_node, + ) + + +def create_agent_guardrail_node( + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + execution_stage: Literal["PreExecution", "PostExecution"], + success_node: str, + failure_node: str, +) -> Tuple[str, Callable[[AgentGuardrailsGraphState], Any]]: + # To be implemented in future PR + def _payload_generator(state: AgentGuardrailsGraphState) -> str: + if not state.messages: + return "" + return _message_text(state.messages[-1]) + + return _create_guardrail_node( + guardrail, + GuardrailScope.AGENT, + execution_stage, + _payload_generator, + success_node, + failure_node, + ) + + +def create_tool_guardrail_node( + guardrail: CustomGuardrail | BuiltInValidatorGuardrail, + execution_stage: Literal["PreExecution", "PostExecution"], + success_node: str, + failure_node: str, +) -> Tuple[str, Callable[[AgentGuardrailsGraphState], Any]]: + # To be implemented in future PR + def _payload_generator(state: AgentGuardrailsGraphState) -> str: + if not state.messages: + return "" + return _message_text(state.messages[-1]) + + return _create_guardrail_node( + guardrail, + GuardrailScope.TOOL, + execution_stage, + _payload_generator, + success_node, + failure_node, + ) diff --git a/src/uipath_langchain/agent/guardrails/guardrails_subgraph.py b/src/uipath_langchain/agent/guardrails/guardrails_subgraph.py new file mode 100644 index 00000000..2addb6b5 --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/guardrails_subgraph.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +from typing import Any, Callable, Literal, Sequence + +from langgraph.constants import END, START +from langgraph.graph import StateGraph +from uipath.agent.models.agent import AgentGuardrail +from uipath.platform.guardrails import GuardrailScope + +from .guardrail_nodes import ( + create_agent_guardrail_node, + create_llm_guardrail_node, + create_tool_guardrail_node, +) +from .actions.base_action import GuardrailAction, GuardrailActionNode +from .types import AgentGuardrailsGraphState + + +def create_guardrails_subgraph( + main_inner_node: tuple[str, Any], + guardrails: Sequence[tuple[AgentGuardrail, GuardrailAction]] | None, + scope: GuardrailScope, + node_factory: Callable[ + [ + AgentGuardrail, + Literal["PreExecution", "PostExecution"], + str, # success node name + str, # fail node name + ], + GuardrailActionNode, + ] = create_llm_guardrail_node, +) -> Any: + """Build a subgraph that enforces guardrails around an inner node. + + START -> pre-eval nodes (dynamic goto) -> inner -> post-eval nodes (dynamic goto) -> END + + No static edges are added between guardrail nodes; each eval decides via Command. + Failure nodes are added but not chained; they are expected to route via Command. + """ + inner_name, inner_node = main_inner_node + + subgraph = StateGraph(AgentGuardrailsGraphState) + + # Add pre execution guardrail nodes + first_pre_exec_guardrail_node = _build_guardrail_node_chain( + subgraph, guardrails, scope, "PreExecution", node_factory, inner_name + ) + subgraph.add_edge(START, first_pre_exec_guardrail_node) + + # Add post execution guardrail nodes + first_post_exec_guardrail_node = _build_guardrail_node_chain( + subgraph, guardrails, scope, "PostExecution", node_factory, END + ) + subgraph.add_node(inner_name, inner_node) + subgraph.add_edge(inner_name, first_post_exec_guardrail_node) + + return subgraph.compile() + + +def _build_guardrail_node_chain( + subgraph: StateGraph, + guardrails: Sequence[tuple[AgentGuardrail, GuardrailAction]] | None, + scope: GuardrailScope, + execution_stage: Literal["PreExecution", "PostExecution"], + node_factory: Callable[ + [ + AgentGuardrail, + Literal["PreExecution", "PostExecution"], + str, # success node name + str, # fail node name + ], + GuardrailActionNode, + ], + next_node: str, +) -> str: + """Recursively build a chain of guardrail nodes in reverse order. + + This function processes guardrails from last to first, creating a chain where: + - Each guardrail node evaluates the guardrail condition + - On success, it routes to the next guardrail node (or the final next_node) + - On failure, it routes to a failure node that either throws an error or continues to next_node + + Args: + subgraph: The StateGraph to add nodes and edges to. + guardrails: Sequence of (guardrail, action) tuples to process. Processed in reverse. + scope: The scope of the guardrails (LLM, AGENT, or TOOL). + execution_stage: Whether this is "PreExecution" or "PostExecution" guardrails. + node_factory: Factory function to create guardrail evaluation nodes. + next_node: The node name to route to after all guardrails pass. + + Returns: + The name of the first guardrail node in the chain (or next_node if no guardrails). + """ + # Base case: no guardrails to process, return the next node directly + if not guardrails: + return next_node + + guardrail, action = guardrails[-1] + remaining_guardrails = guardrails[:-1] + + fail_node_name, fail_node = action.action_node( + guardrail=guardrail, scope=scope, execution_stage=execution_stage + ) + + # Create the guardrail evaluation node. + guardrail_node_name, guardrail_node = node_factory( + guardrail, execution_stage, next_node, fail_node_name + ) + + # Add both nodes to the subgraph + subgraph.add_node(guardrail_node_name, guardrail_node) + subgraph.add_node(fail_node_name, fail_node) + + # Failure path route to the next node + subgraph.add_edge(fail_node_name, next_node) + + previous_node_name = _build_guardrail_node_chain( + subgraph, + remaining_guardrails, + scope, + execution_stage, + node_factory, + guardrail_node_name, + ) + + return previous_node_name + + +def create_llm_guardrails_subgraph( + llm_node: tuple[str, Any], + guardrails: Sequence[tuple[AgentGuardrail, GuardrailAction]] | None, +) -> Any: + applicable_guardrails = [ + (guardrail, _) + for (guardrail, _) in (guardrails or []) + if GuardrailScope.LLM in guardrail.selector.scopes + ] + return create_guardrails_subgraph( + main_inner_node=llm_node, + guardrails=applicable_guardrails, + scope=GuardrailScope.LLM, + node_factory=create_llm_guardrail_node, + ) + + +def create_agent_guardrails_subgraph( + agent_node: tuple[str, Any], + guardrails: Sequence[tuple[AgentGuardrail, GuardrailAction]] | None, +) -> Any: + applicable_guardrails = [ + (guardrail, _) + for (guardrail, _) in (guardrails or []) + if GuardrailScope.AGENT in guardrail.selector.scopes + ] + return create_guardrails_subgraph( + main_inner_node=agent_node, + guardrails=applicable_guardrails, + scope=GuardrailScope.AGENT, + node_factory=create_agent_guardrail_node, + ) + + +def create_tool_guardrails_subgraph( + tool_node: tuple[str, Any], + guardrails: Sequence[tuple[AgentGuardrail, GuardrailAction]] | None, +) -> Any: + tool_name, _ = tool_node + applicable_guardrails = [ + (guardrail, action) + for (guardrail, action) in (guardrails or []) + if GuardrailScope.TOOL in guardrail.selector.scopes + and tool_name in guardrail.selector.match_names + ] + return create_guardrails_subgraph( + main_inner_node=tool_node, + guardrails=applicable_guardrails, + scope=GuardrailScope.TOOL, + node_factory=create_tool_guardrail_node, + ) diff --git a/src/uipath_langchain/agent/guardrails/types.py b/src/uipath_langchain/agent/guardrails/types.py new file mode 100644 index 00000000..8403abd4 --- /dev/null +++ b/src/uipath_langchain/agent/guardrails/types.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from typing import Annotated, Optional + +from langchain_core.messages import AnyMessage +from langgraph.graph.message import add_messages +from pydantic import BaseModel + + +class AgentGuardrailsGraphState(BaseModel): + """Agent Guardrails Graph state for guardrail subgraph.""" + + messages: Annotated[list[AnyMessage], add_messages] = [] + guardrail_validation_result: Optional[str] = None diff --git a/src/uipath_langchain/agent/react/agent.py b/src/uipath_langchain/agent/react/agent.py index 9f8110b0..43db50a7 100644 --- a/src/uipath_langchain/agent/react/agent.py +++ b/src/uipath_langchain/agent/react/agent.py @@ -7,8 +7,11 @@ from langgraph.constants import END, START from langgraph.graph import StateGraph from pydantic import BaseModel +from uipath.agent.models.agent import AgentGuardrail from ..tools import create_tool_node +from ..guardrails.actions import GuardrailAction +from ..guardrails import create_llm_guardrails_subgraph from .init_node import ( create_init_node, ) @@ -48,8 +51,14 @@ def create_agent( input_schema: Type[InputT] | None = None, output_schema: Type[OutputT] | None = None, config: AgentGraphConfig | None = None, + guardrails: Sequence[tuple[AgentGuardrail, GuardrailAction]] | None = None, ) -> StateGraph[AgentGraphState, None, InputT, OutputT]: - """Build agent graph with INIT -> AGENT <-> TOOLS loop, terminated by control flow tools. + """Build agent graph with INIT -> AGENT(subgraph) <-> TOOLS loop, terminated by control flow tools. + + The AGENT node is a subgraph that runs: + - before-agent guardrail middlewares + - the LLM tool-executing node + - after-agent guardrail middlewares Control flow tools (end_execution, raise_error) are auto-injected alongside regular tools. """ @@ -63,7 +72,6 @@ def create_agent( llm_tools: list[BaseTool] = [*agent_tools, *flow_control_tools] init_node = create_init_node(messages) - agent_node = create_llm_node(model, llm_tools) tool_nodes = create_tool_node(agent_tools) terminate_node = create_terminate_node(output_schema) @@ -75,7 +83,6 @@ def create_agent( InnerAgentGraphState, input_schema=input_schema, output_schema=output_schema ) builder.add_node(AgentGraphNode.INIT, init_node) - builder.add_node(AgentGraphNode.AGENT, agent_node) for tool_name, tool_node in tool_nodes.items(): builder.add_node(tool_name, tool_node) @@ -83,6 +90,12 @@ def create_agent( builder.add_node(AgentGraphNode.TERMINATE, terminate_node) builder.add_edge(START, AgentGraphNode.INIT) + + llm_node = create_llm_node(model, llm_tools) + llm_with_guardrails_subgraph = create_llm_guardrails_subgraph( + (AgentGraphNode.LLM, llm_node), guardrails + ) + builder.add_node(AgentGraphNode.AGENT, llm_with_guardrails_subgraph) builder.add_edge(AgentGraphNode.INIT, AgentGraphNode.AGENT) tool_node_names = list(tool_nodes.keys()) diff --git a/src/uipath_langchain/agent/react/router.py b/src/uipath_langchain/agent/react/router.py index 50abc509..893ae215 100644 --- a/src/uipath_langchain/agent/react/router.py +++ b/src/uipath_langchain/agent/react/router.py @@ -6,7 +6,7 @@ from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL from .constants import MAX_SUCCESSIVE_COMPLETIONS -from .exceptions import AgentNodeRoutingException +from ..exceptions import AgentNodeRoutingException from .types import AgentGraphNode, AgentGraphState from .utils import count_successive_completions diff --git a/src/uipath_langchain/agent/react/terminate_node.py b/src/uipath_langchain/agent/react/terminate_node.py index 93409a69..95a1e9d1 100644 --- a/src/uipath_langchain/agent/react/terminate_node.py +++ b/src/uipath_langchain/agent/react/terminate_node.py @@ -7,7 +7,7 @@ from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL from uipath.runtime.errors import UiPathErrorCode -from .exceptions import ( +from ..exceptions import ( AgentNodeRoutingException, AgentTerminationException, ) diff --git a/src/uipath_langchain/agent/react/types.py b/src/uipath_langchain/agent/react/types.py index 194369f0..560c9b7a 100644 --- a/src/uipath_langchain/agent/react/types.py +++ b/src/uipath_langchain/agent/react/types.py @@ -1,7 +1,7 @@ from __future__ import annotations from enum import StrEnum -from typing import Annotated +from typing import Annotated, Optional from langchain_core.messages import AnyMessage from langgraph.graph.message import add_messages @@ -14,9 +14,17 @@ class AgentGraphState(BaseModel): messages: Annotated[list[AnyMessage], add_messages] = [] +class AgentGuardrailsGraphState(BaseModel): + """Agent Guardrails Graph state for guardrail subgraph.""" + + messages: Annotated[list[AnyMessage], add_messages] = [] + guardrailResultReason: Optional[str] = None + + class AgentGraphNode(StrEnum): INIT = "init" AGENT = "agent" + LLM = "llm" TOOLS = "tools" TERMINATE = "terminate" diff --git a/tests/agent/guardrails/actions/test_block_action.py b/tests/agent/guardrails/actions/test_block_action.py new file mode 100644 index 00000000..ce9c7989 --- /dev/null +++ b/tests/agent/guardrails/actions/test_block_action.py @@ -0,0 +1,32 @@ +"""Tests for BlockAction guardrail failure behavior.""" + +import types + +import pytest +from uipath.platform.guardrails import GuardrailScope + +from uipath_langchain.agent.exceptions import AgentTerminationException +from uipath_langchain.agent.guardrails.actions.block_action import BlockAction + + +class TestBlockAction: + @pytest.mark.asyncio + async def test_node_name_and_exception_pre_llm(self): + """PreExecution + LLM: name is sanitized and node raises correct exception.""" + action = BlockAction(reason="Sensitive data detected") + guardrail = types.SimpleNamespace(name="My Guardrail (v1)") + + node_name, node = action.action_node( + guardrail=guardrail, + scope=GuardrailScope.LLM, + execution_stage="PreExecution", + ) + + assert node_name == "My_Guardrail_v1_preexecution_llm_block" + + with pytest.raises(AgentTerminationException) as excinfo: + await node(types.SimpleNamespace()) + + # The exception string is the provided reason + assert str(excinfo.value) == "Sensitive data detected" + diff --git a/tests/agent/guardrails/test_guardrail_nodes.py b/tests/agent/guardrails/test_guardrail_nodes.py new file mode 100644 index 00000000..b3b20f70 --- /dev/null +++ b/tests/agent/guardrails/test_guardrail_nodes.py @@ -0,0 +1,91 @@ +"""Tests for guardrail node creation and routing.""" + +import types + +import pytest +from langchain_core.messages import HumanMessage, SystemMessage + +from uipath_langchain.agent.guardrails.guardrail_nodes import ( + create_llm_guardrail_node, +) + + +class FakeGuardrails: + def __init__(self, result): + self._result = result + self.last_text = None + self.last_guardrail = None + + def evaluate_guardrail(self, text, guardrail): + self.last_text = text + self.last_guardrail = guardrail + return self._result + + +class FakeUiPath: + def __init__(self, result): + self.guardrails = FakeGuardrails(result) + + +def _patch_uipath(monkeypatch, validation_passed=True, reason=None): + result = types.SimpleNamespace(validation_passed=validation_passed, reason=reason) + fake = FakeUiPath(result) + monkeypatch.setattr( + "uipath_langchain.agent.guardrails.guardrail_nodes.UiPath", + lambda: fake, + ) + return fake + + +class TestLlmGuardrailNodes: + @pytest.mark.asyncio + @pytest.mark.parametrize( + "execution_stage,expected_name", + [ + ("PreExecution", "llm_preexecution_example"), + ("PostExecution", "llm_postexecution_example"), + ], + ids=["pre-success", "post-success"], + ) + async def test_llm_success_pre_and_post( + self, monkeypatch, execution_stage, expected_name + ): + guardrail = types.SimpleNamespace(name="Example") + _patch_uipath(monkeypatch, validation_passed=True, reason=None) + node_name, node = create_llm_guardrail_node( + guardrail=guardrail, + execution_stage=execution_stage, + success_node="ok", + failure_node="nope", + ) + assert node_name == expected_name + state = types.SimpleNamespace(messages=[HumanMessage("payload")]) + cmd = await node(state) + assert getattr(cmd, "goto") == "ok" + assert getattr(cmd, "update") == {"guardrail_validation_result": None} + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "execution_stage,expected_name", + [ + ("PreExecution", "llm_preexecution_example"), + ("PostExecution", "llm_postexecution_example"), + ], + ids=["pre-fail", "post-fail"], + ) + async def test_llm_failure_pre_and_post( + self, monkeypatch, execution_stage, expected_name + ): + guardrail = types.SimpleNamespace(name="Example") + _patch_uipath(monkeypatch, validation_passed=False, reason="policy_violation") + node_name, node = create_llm_guardrail_node( + guardrail=guardrail, + execution_stage=execution_stage, + success_node="ok", + failure_node="nope", + ) + assert node_name == expected_name + state = types.SimpleNamespace(messages=[SystemMessage("payload")]) + cmd = await node(state) + assert getattr(cmd, "goto") == "nope" + assert getattr(cmd, "update") == {"guardrail_validation_result": "policy_violation"} diff --git a/tests/agent/guardrails/test_guardrails_subgraph.py b/tests/agent/guardrails/test_guardrails_subgraph.py new file mode 100644 index 00000000..d98381e3 --- /dev/null +++ b/tests/agent/guardrails/test_guardrails_subgraph.py @@ -0,0 +1,121 @@ +"""Tests for guardrails subgraph construction.""" + +import types + +import pytest +from uipath.platform.guardrails import GuardrailScope + +import uipath_langchain.agent.guardrails.guardrails_subgraph as mod + + +class FakeStateGraph: + def __init__(self, _state_type): + self.added_nodes = [] + self.added_edges = [] + + def add_node(self, name, node): + self.added_nodes.append((name, node)) + + def add_edge(self, src, dst): + self.added_edges.append((src, dst)) + + def compile(self): + # Return a simple object we can inspect if needed + return types.SimpleNamespace(nodes=self.added_nodes, edges=self.added_edges) + + +def _fake_action(fail_prefix): + class _Action: + def action_node(self, guardrail, scope, execution_stage): + name = f"{fail_prefix}_{execution_stage}_{guardrail.name}" + return name, lambda s: s + + return _Action() + + +def _fake_factory(eval_prefix): + def _factory(guardrail, execution_stage, success_node, failure_node): + name = f"{eval_prefix}_{execution_stage}_{guardrail.name}" + return name, (lambda s: s) # node function not invoked in this test + + return _factory + + +class TestGuardrailsSubgraph: + def test_no_guardrails_creates_simple_edges(self, monkeypatch): + """When no guardrails, edges should be: START -> inner -> END.""" + monkeypatch.setattr(mod, "StateGraph", FakeStateGraph) + monkeypatch.setattr(mod, "START", "START") + monkeypatch.setattr(mod, "END", "END") + + inner = ("inner", lambda s: s) + compiled = mod.create_guardrails_subgraph( + main_inner_node=inner, + guardrails=None, + scope=GuardrailScope.LLM, + node_factory=_fake_factory("eval"), + ) + + # Expect only inner node added and two edges + assert ("inner", inner[1]) in compiled.nodes + assert set(compiled.edges) == {("START", "inner"), ("inner", "END")} + + def test_two_guardrails_build_chains_pre_and_post(self, monkeypatch): + """Two guardrails should create reverse-ordered pre/post chains with failure edges.""" + monkeypatch.setattr(mod, "StateGraph", FakeStateGraph) + monkeypatch.setattr(mod, "START", "START") + monkeypatch.setattr(mod, "END", "END") + + # Guardrails g1 (first), g2 (second); builder processes last first + guardrail1 = types.SimpleNamespace(name="guardrail1") + guardrail2 = types.SimpleNamespace(name="guardrail2") + a1 = _fake_action("log") + a2 = _fake_action("block") + guardrails = [(guardrail1, a1), (guardrail2, a2)] + + inner = ("inner", lambda s: s) + compiled = mod.create_guardrails_subgraph( + main_inner_node=inner, + guardrails=guardrails, + scope=GuardrailScope.LLM, + node_factory=_fake_factory("eval"), + ) + + # Expected node names + pre_g1 = "eval_PreExecution_guardrail1" + log_pre_g1 = "log_PreExecution_guardrail1" + pre_g2 = "eval_PreExecution_guardrail2" + block_pre_g2 = "block_PreExecution_guardrail2" + post_g1 = "eval_PostExecution_guardrail1" + log_post_g1 = "log_PostExecution_guardrail1" + post_g2 = "eval_PostExecution_guardrail2" + block_post_g2 = "block_PostExecution_guardrail2" + + # Edges (order not guaranteed; compare as a set) + expected_edges = { + # Pre-execution chain + ("START", pre_g1), + (log_pre_g1, pre_g2), + (block_pre_g2, "inner"), + # Inner to post-execution chain + ("inner", post_g1), + # Post-execution failure routing to END + (log_post_g1, post_g2), + (block_post_g2, "END"), + } + assert expected_edges.issubset(set(compiled.edges)) + + # Ensure expected nodes are present + node_names = {name for name, _ in compiled.nodes} + for name in [ + pre_g1, + pre_g2, + post_g1, + post_g2, + log_pre_g1, + block_pre_g2, + log_post_g1, + block_post_g2, + "inner", + ]: + assert name in node_names