Skip to main content

LangChain Integration

FieldValue
Document IDASCEND-SDK-002
Version1.0.0
Last UpdatedDecember 19, 2025
AuthorAscend Engineering Team
PublisherOW-KAI Technologies Inc.
ClassificationEnterprise Client Documentation
ComplianceSOC 2 CC6.1/CC6.2, PCI-DSS 7.1/8.3, HIPAA 164.312, NIST 800-53 AC-2/SI-4

Reading Time: 10 minutes | Skill Level: Intermediate

Overview

Integrate ASCEND governance with LangChain to control AI agent tool execution. Every tool invocation is evaluated against governance policies before execution.

Installation

pip install ascend-langchain langchain langchain-openai

Package Info: ascend-langchain on PyPI | Version: 1.0.0 | License: MIT

The ascend-langchain package includes the ASCEND SDK and provides:

  • AscendToolWrapper - Wrap any existing tool with governance
  • AscendCallbackHandler - Automatic governance for all agent actions
  • @governed decorator - Simple function-level governance
  • GovernedBaseTool - Base class for custom governed tools

Quick Start

Governed Tool Wrapper

from langchain.tools import BaseTool
from ascend import AscendClient, AgentAction
from pydantic import BaseModel, Field
from typing import Type, Optional

class GovernedTool(BaseTool):
"""Base class for ASCEND-governed LangChain tools."""

ascend_client: AscendClient = None
action_type: str = "langchain.tool"
tool_name: str = "unknown"

def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ascend_client = AscendClient(
api_key=os.environ["ASCEND_API_KEY"],
agent_id="langchain-agent"
)

def _check_governance(self, input_data: str) -> bool:
"""Check governance before tool execution."""
action = AgentAction(
agent_id="langchain-agent",
agent_name="LangChain Agent",
action_type=self.action_type,
resource=f"{self.name}: {input_data[:100]}",
tool_name=self.tool_name,
action_details={"input": input_data}
)

result = self.ascend_client.submit_action(action)

if result.is_denied():
raise PermissionError(f"Tool execution denied: {result.reason}")

if result.is_pending():
# Wait for approval or handle async
final = self.ascend_client.wait_for_decision(
result.action_id,
timeout_ms=60000
)
if not final.is_approved():
raise PermissionError(f"Approval not granted: {final.reason}")

return True

def _run(self, query: str) -> str:
self._check_governance(query)
return self._execute(query)

def _execute(self, query: str) -> str:
"""Override in subclass with actual tool logic."""
raise NotImplementedError

SQL Query Tool

class GovernedSQLTool(GovernedTool):
"""SQL query tool with ASCEND governance."""

name = "sql_query"
description = "Execute SQL queries against the database"
action_type = "database.query"
tool_name = "postgresql"

def _execute(self, query: str) -> str:
# Actual SQL execution
connection = get_database_connection()
result = connection.execute(query)
return str(result.fetchall())


# Usage with LangChain agent
from langchain.agents import AgentExecutor, create_react_agent
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(model="gpt-4")
tools = [GovernedSQLTool()]

agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)

result = executor.invoke({"input": "How many users are in the database?"})

Governance Decorator

Simpler approach using a decorator:

import functools
from ascend import AscendClient, AgentAction

_client = None

def get_ascend_client():
global _client
if _client is None:
_client = AscendClient(
api_key=os.environ["ASCEND_API_KEY"],
agent_id="langchain-agent",
agent_name="LangChain Agent"
)
return _client

def governed(action_type: str, tool_name: str, risk_level: str = "medium"):
"""Decorator to add ASCEND governance to any function."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
client = get_ascend_client()

# Build action from function call
action = AgentAction(
agent_id="langchain-agent",
agent_name="LangChain Agent",
action_type=action_type,
resource=f"Execute {func.__name__}",
tool_name=tool_name,
action_details={
"function": func.__name__,
"args": str(args)[:200],
"kwargs": str(kwargs)[:200]
},
risk_indicators={"risk_level": risk_level}
)

result = client.submit_action(action)

if not result.is_approved():
raise PermissionError(f"Action denied: {result.reason}")

return func(*args, **kwargs)
return wrapper
return decorator


# Usage
@governed("database.query", "postgresql", "medium")
def execute_sql(query: str) -> list:
"""Execute SQL query with governance."""
return db.execute(query).fetchall()

@governed("file.write", "filesystem", "high")
def write_file(path: str, content: str) -> None:
"""Write file with governance."""
with open(path, 'w') as f:
f.write(content)

Tool Factory

Create governed tools dynamically:

from langchain.tools import StructuredTool
from ascend import AscendClient, AgentAction

def create_governed_tool(
name: str,
description: str,
func: callable,
action_type: str,
tool_name: str,
risk_level: str = "medium"
):
"""Factory function to create governed LangChain tools."""

client = AscendClient(
api_key=os.environ["ASCEND_API_KEY"],
agent_id="langchain-agent"
)

def governed_func(**kwargs):
# Check governance
action = AgentAction(
agent_id="langchain-agent",
agent_name="LangChain Agent",
action_type=action_type,
resource=f"{name}: {str(kwargs)[:100]}",
tool_name=tool_name,
action_details=kwargs,
risk_indicators={"risk_level": risk_level}
)

result = client.submit_action(action)

if not result.is_approved():
return f"ERROR: Action denied by governance - {result.reason}"

# Execute original function
return func(**kwargs)

return StructuredTool.from_function(
func=governed_func,
name=name,
description=description
)


# Create tools
sql_tool = create_governed_tool(
name="sql_query",
description="Execute SQL queries",
func=lambda query: db.execute(query).fetchall(),
action_type="database.query",
tool_name="postgresql"
)

file_tool = create_governed_tool(
name="write_file",
description="Write content to a file",
func=lambda path, content: open(path, 'w').write(content),
action_type="file.write",
tool_name="filesystem",
risk_level="high"
)

tools = [sql_tool, file_tool]

Agent Callback Handler

Monitor all agent actions through callbacks:

from langchain.callbacks.base import BaseCallbackHandler
from ascend import AscendClient, AgentAction

class AscendCallbackHandler(BaseCallbackHandler):
"""LangChain callback handler for ASCEND governance."""

def __init__(self, agent_id: str = "langchain-agent"):
self.client = AscendClient(
api_key=os.environ["ASCEND_API_KEY"],
agent_id=agent_id
)
self.action_ids = {}

def on_tool_start(
self,
serialized: dict,
input_str: str,
**kwargs
):
"""Called when a tool starts - check governance."""
tool_name = serialized.get("name", "unknown")

action = AgentAction(
agent_id="langchain-agent",
agent_name="LangChain Agent",
action_type=f"langchain.{tool_name}",
resource=f"Tool: {tool_name}",
tool_name=tool_name,
action_details={"input": input_str[:500]}
)

result = self.client.submit_action(action)
self.action_ids[tool_name] = result.action_id

if not result.is_approved():
raise PermissionError(f"Tool {tool_name} denied: {result.reason}")

def on_tool_end(self, output: str, **kwargs):
"""Called when a tool ends - log completion."""
tool_name = kwargs.get("name", "unknown")
action_id = self.action_ids.get(tool_name)

if action_id:
self.client.log_action_completed(action_id, {
"output_length": len(output)
})

def on_tool_error(self, error: Exception, **kwargs):
"""Called on tool error - log failure."""
tool_name = kwargs.get("name", "unknown")
action_id = self.action_ids.get(tool_name)

if action_id:
self.client.log_action_failed(action_id, {
"error": str(error)
})


# Usage
from langchain.agents import AgentExecutor

handler = AscendCallbackHandler()
executor = AgentExecutor(
agent=agent,
tools=tools,
callbacks=[handler]
)

Complete Example

#!/usr/bin/env python3
"""
Complete LangChain + ASCEND Integration Example
"""
import os
from typing import Optional
from langchain.agents import AgentExecutor, create_react_agent
from langchain_openai import ChatOpenAI
from langchain.tools import BaseTool
from langchain.prompts import PromptTemplate
from pydantic import Field
from ascend import AscendClient, AgentAction

# Initialize ASCEND client
ascend = AscendClient(
api_key=os.environ["ASCEND_API_KEY"],
agent_id="financial-advisor-agent",
agent_name="Financial Advisor"
)


class GovernedDatabaseTool(BaseTool):
"""Query database with ASCEND governance."""

name = "query_database"
description = "Query the financial database for account information"

def _run(self, query: str) -> str:
# Check governance
action = AgentAction(
agent_id="financial-advisor-agent",
agent_name="Financial Advisor",
action_type="database.query",
resource=f"Query: {query[:100]}",
tool_name="postgresql",
action_details={"query": query},
context={"data_classification": "pii"}
)

result = ascend.submit_action(action)

if result.is_denied():
return f"ACCESS DENIED: {result.reason}"

if result.is_pending():
return "Query requires approval - please wait for authorization"

# Execute query (mock)
return f"Results for: {query}"


class GovernedTransferTool(BaseTool):
"""Execute wire transfers with ASCEND governance."""

name = "wire_transfer"
description = "Execute a wire transfer between accounts"

def _run(self, details: str) -> str:
# High-risk action - always check governance
action = AgentAction(
agent_id="financial-advisor-agent",
agent_name="Financial Advisor",
action_type="financial.transfer",
resource=f"Wire transfer: {details[:100]}",
tool_name="banking_api",
action_details={"details": details},
risk_indicators={
"risk_level": "critical",
"requires_approval": True
}
)

result = ascend.submit_action(action)

if not result.is_approved():
if result.is_pending():
return (
f"Transfer requires manager approval.\n"
f"Action ID: {result.action_id}\n"
f"Risk Level: {result.risk_level}"
)
return f"Transfer DENIED: {result.reason}"

# Execute transfer (mock)
return f"Transfer executed successfully. Reference: TXN-{result.action_id}"


# Create agent
llm = ChatOpenAI(model="gpt-4", temperature=0)
tools = [GovernedDatabaseTool(), GovernedTransferTool()]

prompt = PromptTemplate.from_template("""
You are a financial advisor assistant. You can:
1. Query the database for account information
2. Execute wire transfers (requires governance approval)

Always verify actions are approved before confirming to the user.

Tools: {tools}
Tool Names: {tool_names}

Question: {input}
Thought: {agent_scratchpad}
""")

agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

# Run agent
result = executor.invoke({
"input": "What is the balance of account 12345?"
})
print(result["output"])

Best Practices

  1. Classify all tools by risk level
  2. Include context like user ID, session
  3. Handle pending approvals gracefully
  4. Log completions for audit trail
  5. Fail secure on governance errors

Next Steps


Document Version: 1.0.0 | Last Updated: December 2025