LangChain Integration
Automatic instrumentation for LangChain chains, agents, and tools with zero code changes.
langchain >= 0.1.0ChainsAgentsTools
Installation
Terminal
pip install turingpulse langchain langchain-openaiQuick Start
1. Initialize & Auto-Instrument
setup.py
from turingpulse import init
from turingpulse.integrations.langchain import auto_instrument
# Initialize TuringPulse
init(
api_key="sk_live_your_api_key",
project_id="my-project",
)
# Enable auto-instrumentation for all LangChain components
auto_instrument()2. Use LangChain Normally
chain.py
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
# Create a chain - it's automatically traced
llm = ChatOpenAI(model="gpt-4")
prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
chain = prompt | llm | StrOutputParser()
# Run the chain - traces are captured automatically
result = chain.invoke({"topic": "programming"})
print(result)ℹ️
Zero Code Changes
Once auto-instrumentation is enabled, all LangChain components are automatically traced. No decorators or wrappers needed.
What Gets Traced
- LLM Calls — Model, tokens, latency, cost
- Chain Executions — Full chain with all steps
- Agent Loops — Reasoning steps and tool calls
- Tool Invocations — Input, output, timing
- Retrievers — Documents retrieved
- Memory Operations — Chat history access
- Embeddings — Batch sizes, dimensions
- Errors — Exceptions with full context
Manual Instrumentation
manual.py
from turingpulse.integrations.langchain import instrument_chain
# Create your chain
chain = prompt | llm | parser
# Wrap with instrumentation
instrumented_chain = instrument_chain(
chain,
agent_id="my-chain",
labels={"team": "support", "version": "v2"},
)
# Use the instrumented chain
result = instrumented_chain.invoke({"input": "Hello"})With Governance
governance.py
from turingpulse import GovernanceDirective
from turingpulse.integrations.langchain import instrument_agent
instrumented_agent = instrument_agent(
agent_executor,
agent_id="financial-agent",
governance=GovernanceDirective(
hitl=True,
hitl_condition=lambda ctx: ctx.tool_name in ["transfer_funds", "delete_record"],
reviewers=["compliance@company.com"],
hatl=True,
hatl_sample_rate=0.2,
),
)With KPIs
kpis.py
from turingpulse import KPIConfig
from turingpulse.integrations.langchain import instrument_chain
instrumented_chain = instrument_chain(
chain,
agent_id="document-qa",
kpis=[
KPIConfig(
kpi_id="latency_ms",
use_duration=True,
alert_threshold=8000,
comparator="gt",
),
KPIConfig(
kpi_id="total_tokens",
value=lambda ctx: ctx.metadata.get("total_tokens", 0),
alert_threshold=10000,
comparator="gt",
),
KPIConfig(
kpi_id="cost_usd",
value=lambda ctx: ctx.metadata.get("total_cost", 0),
alert_threshold=0.25,
comparator="gt",
),
],
)Streaming Support
streaming.py
# Streaming is fully supported
for chunk in instrumented_chain.stream({"input": "Hello"}):
print(chunk, end="", flush=True)
# Async streaming
async for chunk in instrumented_chain.astream({"input": "Hello"}):
print(chunk, end="", flush=True)Callback Handler
callback.py
from turingpulse.integrations.langchain import TuringPulseCallbackHandler
# Create the callback handler
callback = TuringPulseCallbackHandler(
agent_id="my-chain",
labels={"version": "v1"},
)
# Pass to any LangChain component
result = chain.invoke(
{"input": "Hello"},
config={"callbacks": [callback]},
)