diff --git a/pipeline/preprocessors/link_map.py b/pipeline/preprocessors/link_map.py
index 7b8df97363..73e7ca81aa 100644
--- a/pipeline/preprocessors/link_map.py
+++ b/pipeline/preprocessors/link_map.py
@@ -333,6 +333,7 @@ class LinkMap(TypedDict):
"BaseMessage": "classes/_langchain_core.messages.BaseMessage.html",
"HumanMessage": "classes/_langchain_core.messages.HumanMessage.html",
"SystemMessage": "classes/_langchain_core.messages.SystemMessage.html",
+ "SystemMessage.concat": "classes/_langchain_core.messages.SystemMessage.html#concat",
"ToolMessage": "classes/_langchain_core.messages.ToolMessage.html",
"ToolCallChunk": "classes/_langchain_core.messages.ToolCallChunk.html",
"BaseChatModel": "classes/_langchain_core.language_models_chat_models.BaseChatModel.html",
@@ -350,8 +351,8 @@ class LinkMap(TypedDict):
"Document": "classes/_langchain_core.documents.Document.html",
"Embeddings": "classes/_langchain_core.embeddings.Embeddings.html",
"initChatModel": "functions/langchain.chat_models_universal.initChatModel.html",
- "RunnableConfig": "interfaces/_langchain_core.runnables.RunnableConfig.html",
"Runnable": "classes/_langchain_core.runnables.Runnable.html",
+ "RunnableConfig": "interfaces/_langchain_core.runnables.RunnableConfig.html",
"Retrievers": "interfaces/_langchain_core.retrievers.BaseRetriever.html",
"VectorStore": "classes/_langchain_core.vectorstores.VectorStore.html",
"VectorStore.maxMarginalRelevanceSearch": "classes/_langchain_core.vectorstores.VectorStore.html#maxMarginalRelevanceSearch",
@@ -376,7 +377,9 @@ class LinkMap(TypedDict):
"BaseCheckpointSaver": "classes/_langchain_langgraph-checkpoint.BaseCheckpointSaver.html",
"BaseStore": "classes/_langchain_langgraph-checkpoint.BaseStore.html",
"BaseStore.put": "classes/_langchain_langgraph-checkpoint.BaseStore.html#put",
+ "InMemorySaver": "classes/_langchain_langgraph-checkpoint.MemorySaver.html",
"MemorySaver": "classes/_langchain_langgraph-checkpoint.MemorySaver.html",
+ "AsyncPostgresSaver": "classes/_langchain_langgraph-checkpoint-postgres.AsyncPostgresSaver.html",
"PostgresSaver": "classes/_langchain_langgraph-checkpoint-postgres.index.PostgresSaver.html",
"protocol": "interfaces/_langchain_langgraph-checkpoint.SerializerProtocol.html",
"SerializerProtocol": "interfaces/_langchain_langgraph-checkpoint.SerializerProtocol.html",
@@ -390,24 +393,22 @@ class LinkMap(TypedDict):
"addEdge": "classes/_langchain_langgraph.index.StateGraph.html#addEdge",
"add_node": "classes/_langchain_langgraph.index.StateGraph.html#addNode",
"add_messages": "functions/_langchain_langgraph.index.messagesStateReducer.html",
+ "LastValue": "classes/_langchain_langgraph.channels.LastValue.html",
+ "Topic": "classes/_langchain_langgraph.channels.Topic.html",
"BinaryOperatorAggregate": "classes/_langchain_langgraph.index.BinaryOperatorAggregate.html",
"Command": "classes/_langchain_langgraph.index.Command.html",
"CompiledStateGraph": "classes/_langchain_langgraph.index.CompiledStateGraph.html",
- "create_agent": "functions/langchain.index.createAgent.html",
- "create_react_agent": "functions/_langchain_langgraph.prebuilt.createReactAgent.html",
- "create_supervisor": "functions/_langchain_langgraph-supervisor.createSupervisor.html",
+ "createAgent": "functions/langchain.index.createAgent.html",
+ "createReactAgent": "functions/_langchain_langgraph.prebuilt.createReactAgent.html",
+ "createSupervisor": "functions/_langchain_langgraph-supervisor.createSupervisor.html",
"entrypoint": "functions/_langchain_langgraph.index.entrypoint.html",
"entrypoint.final": "functions/_langchain_langgraph.index.entrypoint.html#final",
"get_state_history": "classes/_langchain_langgraph.pregel.Pregel.html#getStateHistory",
"getStateHistory": "classes/_langchain_langgraph.pregel.Pregel.html#getStateHistory",
"HumanInterrupt": "interfaces/_langchain_langgraph.prebuilt.HumanInterrupt.html",
"interrupt": "functions/_langchain_langgraph.index.interrupt.html",
- "InMemorySaver": "classes/_langchain_langgraph-checkpoint.MemorySaver.html",
- "AsyncPostgresSaver": "classes/_langchain_langgraph-checkpoint-postgres.index.PostgresSaver.html",
"CompiledStateGraph.invoke": "classes/_langchain_langgraph.index.CompiledStateGraph.html#invoke",
"langgraph.json": "cloud/reference/cli/#configuration-file",
- "LastValue": "classes/_langchain_langgraph.channels.LastValue.html",
- "Topic": "classes/_langchain_langgraph.channels.Topic.html",
"messagesStateReducer": "functions/_langchain_langgraph.index.messagesStateReducer.html",
"Pregel": "classes/_langchain_langgraph.pregel.Pregel.html",
"Pregel.stream": "classes/_langchain_langgraph.pregel.Pregel.html#stream",
@@ -435,6 +436,8 @@ class LinkMap(TypedDict):
"ContextEdit": "interfaces/langchain.index.ContextEdit.html",
"toolRetryMiddleware": "functions/langchain.index.toolRetryMiddleware.html",
"modelRetryMiddleware": "functions/langchain.index.modelRetryMiddleware.html",
+ "systemPrompt": "types/langchain.index.CreateAgentParams.html#systemprompt",
+ "openAIModerationMiddleware": "classes/_langchain_openai.middleware.OpenAIModerationMiddleware.html",
},
},
]
diff --git a/src/docs.json b/src/docs.json
index f55bcbbffe..bf88a0dbbb 100644
--- a/src/docs.json
+++ b/src/docs.json
@@ -146,9 +146,10 @@
"pages": [
"oss/python/langchain/overview",
{
- "group": "LangChain v1.0",
+ "group": "Releases",
"pages": [
"oss/python/releases/langchain-v1",
+ "oss/python/releases/changelog",
"oss/python/migrate/langchain-v1"
]
},
@@ -417,6 +418,7 @@
{
"group": "Releases",
"pages": [
+ "oss/python/releases/changelog",
"oss/python/releases/langchain-v1"
]
},
@@ -487,9 +489,10 @@
"pages": [
"oss/javascript/langchain/overview",
{
- "group": "LangChain v1.0",
+ "group": "Releases",
"pages": [
"oss/javascript/releases/langchain-v1",
+ "oss/javascript/releases/changelog",
"oss/javascript/migrate/langchain-v1"
]
},
@@ -792,6 +795,7 @@
{
"group": "Releases",
"pages": [
+ "oss/javascript/releases/changelog",
"oss/javascript/releases/langchain-v1"
]
},
diff --git a/src/langsmith/rate-limiting.mdx b/src/langsmith/rate-limiting.mdx
index 35691704fa..0e2e9d7fff 100644
--- a/src/langsmith/rate-limiting.mdx
+++ b/src/langsmith/rate-limiting.mdx
@@ -30,7 +30,7 @@ def evaluator(inputs: dict, outputs: dict, reference_outputs: dict) -> dict:
...
```
-See the [`langchain`](oss/langchain/models#rate-limiting) documentation for more on how to configure rate limiters.
+See the [`langchain`](/oss/langchain/models#rate-limiting) documentation for more on how to configure rate limiters.
## Retrying with exponential backoff
diff --git a/src/oss/javascript/releases/changelog.mdx b/src/oss/javascript/releases/changelog.mdx
new file mode 100644
index 0000000000..3a1a061423
--- /dev/null
+++ b/src/oss/javascript/releases/changelog.mdx
@@ -0,0 +1,26 @@
+---
+title: Changelog
+sidebarTitle: Changelog
+---
+
+A chronological log of updates and improvements to LangChain JavaScript.
+
+## v1.1 (11/25/2025)
+
+* [Model profiles](/oss/langchain/models#model-profiles): Chat models now expose supported features and capabilities through a `.profile` getter. These data are derived from [models.dev](https://github.com/sst/models.dev), an open source project providing model capability data.
+* [Summarization middleware](/oss/langchain/middleware/built-in#summarization): Updated to support flexible trigger points using model profiles for context-aware summarization.
+* [Structured output](/oss/langchain/structured-output): `ProviderStrategy` support (native structured output) can now be inferred from model profiles.
+* [`SystemMessage` for `createAgent`](/oss/langchain/middleware/custom#working-with-system-messages): Support for passing `SystemMessage` instances directly to `createAgent`'s `systemPrompt` parameter and a new `concat` method for extending system messages. Enables advanced features like cache control and structured content blocks.
+* [Dynamic system prompt middleware](/oss/langchain/agents#dynamic-system-prompt): Return values from `dynamicSystemPromptMiddleware` are now purely additive. When returning a @[`SystemMessage`] or `string`, they are merged with existing system messages rather than replacing them, making it easier to compose multiple middleware that modify the prompt.
+* [Model retry middleware](/oss/langchain/middleware/built-in#model-retry): New middleware for automatically retrying failed model calls with configurable exponential backoff, improving agent reliability.
+* [Content moderation middleware](/oss/langchain/middleware/built-in#content-moderation): OpenAI content moderation middleware for detecting and handling unsafe content in agent interactions. Supports checking user input, model output, and tool results.
+* __Compatibility improvements:__ Fixed error handling for Zod v4 validation errors in structured output and tool schemas, ensuring detailed error messages are properly displayed.
+
+### Resources
+
+- [V1 Migration guide](/oss/migrate/langchain-v1) - How to migrate to LangChain v1
+- [V1 Release notes](/oss/releases/langchain-v1) - Detailed release notes
+- [Release policy](/oss/release-policy) - Detailed release policies
+- [Versioning](/oss/versioning) - Understanding version numbers
+- [Report issues on GitHub](https://github.com/langchain-ai/langchainjs/issues)
+
diff --git a/src/oss/javascript/releases/langchain-v1.mdx b/src/oss/javascript/releases/langchain-v1.mdx
index fc756d9341..7adb26d2f5 100644
--- a/src/oss/javascript/releases/langchain-v1.mdx
+++ b/src/oss/javascript/releases/langchain-v1.mdx
@@ -1,6 +1,6 @@
---
title: What's new in v1
-sidebarTitle: Release notes
+sidebarTitle: V1 Release Notes
---
**LangChain v1 is a focused, production-ready foundation for building agents.** We've streamlined the framework around three core improvements:
@@ -310,7 +310,7 @@ Please report any issues discovered with 1.0 on [GitHub](https://github.com/lang
Read the announcement
-
+
Deep dive into middleware
diff --git a/src/oss/langchain/middleware/built-in.mdx b/src/oss/langchain/middleware/built-in.mdx
index 8917c228fd..346230ff7c 100644
--- a/src/oss/langchain/middleware/built-in.mdx
+++ b/src/oss/langchain/middleware/built-in.mdx
@@ -407,6 +407,18 @@ const agent = createAgent({
For complete examples, configuration options, and integration patterns, see the [Human-in-the-loop documentation](/oss/langchain/human-in-the-loop).
+:::python
+
+ Watch this [video guide](https://www.youtube.com/watch?v=SpfT6-YAVPk) demonstrating Human-in-the-loop middleware behavior.
+
+:::
+
+:::js
+
+ Watch this [video guide](https://www.youtube.com/watch?v=tdOeUVERukA) demonstrating Human-in-the-loop middleware behavior.
+
+:::
+
### Model call limit
Limit the number of model calls to prevent infinite loops or excessive costs. Model call limit is useful for the following:
@@ -454,6 +466,18 @@ const agent = createAgent({
```
:::
+:::python
+
+ Watch this [video guide](https://www.youtube.com/watch?v=nJEER0uaNkE) demonstrating Model Call Limit middleware behavior.
+
+:::
+
+:::js
+
+ Watch this [video guide](https://www.youtube.com/watch?v=x5jLQTFXR0Y) demonstrating Model Call Limit middleware behavior.
+
+:::
+
:::python
@@ -539,6 +563,18 @@ const agent = createAgent({
```
:::
+:::python
+
+ Watch this [video guide](https://www.youtube.com/watch?v=6gYlaJJ8t0w) demonstrating Tool Call Limit middleware behavior.
+
+:::
+
+:::js
+
+ Watch this [video guide](https://www.youtube.com/watch?v=oL6am5UqODY) demonstrating Tool Call Limit middleware behavior.
+
+:::
+
:::python
@@ -686,6 +722,12 @@ const agent = createAgent({
```
:::
+:::python
+
+ Watch this [video guide](https://www.youtube.com/watch?v=8rCRO0DUeIM) demonstrating Model Fallback middleware behavior.
+
+:::
+
:::python
@@ -1051,6 +1093,18 @@ const agent = createAgent({
```
:::
+:::python
+
+ Watch this [video guide](https://www.youtube.com/watch?v=yTWocbVKQxw) demonstrating To-do List middleware behavior.
+
+:::
+
+:::js
+
+ Watch this [video guide](https://www.youtube.com/watch?v=dwvhZ1z_Pas) demonstrating To-do List middleware behavior.
+
+:::
+
:::python
@@ -2876,6 +2930,28 @@ agent = create_agent(
)
```
:::
+:::js
+**API reference:** @[`openAIModerationMiddleware`]
+
+```typescript
+import { createAgent, openAIModerationMiddleware } from "langchain";
+import { ChatOpenAI } from "@langchain/openai";
+
+const model = new ChatOpenAI({ model: "gpt-4o" });
+
+const agent = createAgent({
+ model,
+ tools: [search_tool, customer_data_tool],
+ middleware: [openAIModerationMiddleware({
+ model,
+ moderationModel: "omni-moderation-latest",
+ checkInput: true,
+ checkOutput: true,
+ exitBehavior: "end",
+ })],
+});
+```
+:::
@@ -2923,12 +2999,52 @@ agent = create_agent(
:::
+:::js
+
+ OpenAI model to use.
+
+
+
+ OpenAI moderation model to use. Options: `'omni-moderation-latest'`, `'omni-moderation-2024-09-26'`, `'text-moderation-latest'`, `'text-moderation-stable'`
+
+
+
+ Whether to check user input messages before the model is called
+
+
+
+ Whether to check model output messages after the model is called
+
+
+
+ Whether to check tool result messages before the model is called
+
+
+
+ How to handle violations when content is flagged. Options:
+
+ - `'end'` - End agent execution immediately with a violation message
+ - `'error'` - Raise `OpenAIModerationError` exception
+ - `'replace'` - Replace the flagged content with the violation message and continue
+
+
+
+ Custom template for violation messages. Supports template variables:
+
+ - `{categories}` - Comma-separated list of flagged categories
+ - `{category_scores}` - JSON string of category scores
+ - `{original_content}` - The original flagged content
+
+ Default: `"I'm sorry, but I can't comply with that request. It was flagged for {categories}."`
+
+:::
The middleware integrates OpenAI's moderation endpoint to check content at different stages:
+:::python
**Moderation stages:**
- `check_input` - User messages before model call
- `check_output` - AI messages after model call
@@ -2939,7 +3055,6 @@ The middleware integrates OpenAI's moderation endpoint to check content at diffe
- `'error'` - Raise exception for application handling
- `'replace'` - Replace flagged content and continue
-:::python
```python
from langchain_openai import ChatOpenAI
from langchain_openai.middleware import OpenAIModerationMiddleware
@@ -2992,5 +3107,70 @@ agent_replace = create_agent(
)
```
:::
+:::js
+**Moderation stages:**
+- `checkInput` - User messages before model call
+- `checkOutput` - AI messages after model call
+- `checkToolResults` - Tool outputs before model call
+**Exit behaviors:**
+- `'end'` (default) - Stop execution with violation message
+- `'error'` - Raise exception for application handling
+- `'replace'` - Replace flagged content and continue
+
+```typescript
+import { createAgent, openAIModerationMiddleware } from "langchain";
+import { ChatOpenAI } from "@langchain/openai";
+
+const model = new ChatOpenAI({ model: "gpt-4o" });
+
+// Basic moderation
+const agent = createAgent({
+ model,
+ tools: [searchTool, customerDataTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model,
+ moderationModel: "omni-moderation-latest",
+ checkInput: true,
+ checkOutput: true,
+ }),
+ ],
+});
+
+// Strict moderation with custom message
+const agentStrict = createAgent({
+ model,
+ tools: [searchTool, customerDataTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model,
+ moderationModel: "omni-moderation-latest",
+ checkInput: true,
+ checkOutput: true,
+ checkToolResults: true,
+ exitBehavior: "error",
+ violationMessage:
+ "Content policy violation detected: {categories}. " +
+ "Please rephrase your request.",
+ }),
+ ],
+});
+
+// Moderation with replacement behavior
+const agentReplace = createAgent({
+ model,
+ tools: [searchTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model,
+ checkInput: true,
+ exitBehavior: "replace",
+ violationMessage: "[Content removed due to safety policies]",
+ }),
+ ],
+});
+```
+
+:::
diff --git a/src/oss/langchain/middleware/custom.mdx b/src/oss/langchain/middleware/custom.mdx
index 4d89ed40cf..450f2c1964 100644
--- a/src/oss/langchain/middleware/custom.mdx
+++ b/src/oss/langchain/middleware/custom.mdx
@@ -365,7 +365,12 @@ const loggingMiddleware = createMiddleware({
## Custom state schema
-Middleware can extend the agent's state with custom properties.
+Middleware can extend the agent's state with custom properties. This enables middleware to:
+
+- **Track state across execution**: Maintain counters, flags, or other values that persist throughout the agent's execution lifecycle
+- **Share data between hooks**: Pass information from `before_model` to `after_model` or between different middleware instances
+- **Implement cross-cutting concerns**: Add functionality like rate limiting, usage tracking, user context, or audit logging without modifying the core agent logic
+- **Make conditional decisions**: Use accumulated state to determine whether to continue execution, jump to different nodes, or modify behavior dynamically
:::python
@@ -498,6 +503,219 @@ const result = await agent.invoke({
});
```
+State fields can be either public or private. Fields that start with an underscore (`_`) are considered private and will not be included in the agent's result. Only public fields (those without a leading underscore) are returned.
+
+This is useful for storing internal middleware state that shouldn't be exposed to the caller, such as temporary tracking variables or internal flags:
+
+```typescript
+const middleware = createMiddleware({
+ name: "ExampleMiddleware",
+ stateSchema: z.object({
+ // Public field - included in invoke result
+ publicCounter: z.number().default(0),
+ // Private field - excluded from invoke result
+ _internalFlag: z.boolean().default(false),
+ }),
+ afterModel: (state) => {
+ // Both fields are accessible during execution
+ if (state._internalFlag) {
+ return { publicCounter: state.publicCounter + 1 };
+ }
+ return { _internalFlag: true };
+ },
+});
+
+const result = await agent.invoke({
+ messages: [new HumanMessage("Hello")],
+ publicCounter: 0
+});
+
+// result only contains publicCounter, not _internalFlag
+console.log(result.publicCounter); // 1
+console.log(result._internalFlag); // undefined
+```
+
+:::
+
+## Custom context
+
+Middleware can define a custom context schema to access per-invocation metadata. Unlike state, context is read-only and not persisted between invocations. This makes it ideal for:
+
+- **User information**: Pass user ID, roles, or preferences that don't change during execution
+- **Configuration overrides**: Provide per-invocation settings like rate limits or feature flags
+- **Tenant/workspace context**: Include organization-specific data for multi-tenant applications
+- **Request metadata**: Pass request IDs, API keys, or other metadata needed by middleware
+
+:::python
+
+
+
+
+```python
+from dataclasses import dataclass
+from langchain.agents import create_agent
+from langchain.messages import HumanMessage, SystemMessage
+from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse
+from typing import Callable
+
+
+@dataclass
+class Context:
+ user_id: str
+ tenant_id: str
+ api_key: str | None = None
+
+
+@wrap_model_call
+def add_user_context(
+ request: ModelRequest,
+ handler: Callable[[ModelRequest], ModelResponse],
+) -> ModelResponse:
+ # Access context from runtime
+ user_id = request.runtime.context.user_id
+ tenant_id = request.runtime.context.tenant_id
+
+ # Add user context to system message
+ new_content = list(request.system_message.content_blocks) + [
+ {"type": "text", "text": f"User ID: {user_id}, Tenant: {tenant_id}"}
+ ]
+ new_system_message = SystemMessage(content=new_content)
+
+ return handler(request.override(system_message=new_system_message))
+
+
+agent = create_agent(
+ model="gpt-4o",
+ middleware=[add_user_context],
+ tools=[],
+ context_schema=Context,
+)
+
+# Invoke with context
+result = agent.invoke(
+ {"messages": [HumanMessage("Hello")]},
+ context=Context(user_id="user-123", tenant_id="acme-corp", api_key="sk-...")
+)
+```
+
+
+
+
+
+```python
+from dataclasses import dataclass
+from langchain.agents import create_agent
+from langchain.messages import HumanMessage, SystemMessage
+from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse
+from typing import Callable
+
+
+@dataclass
+class Context:
+ user_id: str
+ tenant_id: str
+ api_key: str | None = None
+
+
+class UserContextMiddleware(AgentMiddleware):
+ def wrap_model_call(
+ self,
+ request: ModelRequest,
+ handler: Callable[[ModelRequest], ModelResponse],
+ ) -> ModelResponse:
+ # Access context from runtime
+ user_id = request.runtime.context.user_id
+ tenant_id = request.runtime.context.tenant_id
+
+ # Add user context to system message
+ new_content = list(request.system_message.content_blocks) + [
+ {"type": "text", "text": f"User ID: {user_id}, Tenant: {tenant_id}"}
+ ]
+ new_system_message = SystemMessage(content=new_content)
+
+ return handler(request.override(system_message=new_system_message))
+
+
+agent = create_agent(
+ model="gpt-4o",
+ middleware=[UserContextMiddleware()],
+ tools=[],
+ context_schema=Context,
+)
+
+# Invoke with context
+result = agent.invoke(
+ {"messages": [HumanMessage("Hello")]},
+ context=Context(user_id="user-123", tenant_id="acme-corp", api_key="sk-...")
+)
+```
+
+
+
+
+:::
+
+:::js
+
+Define a context schema using Zod and access it via `runtime.context` in middleware hooks. Required fields in the context schema will be enforced at the TypeScript level, ensuring you must provide them when calling `agent.invoke()`.
+
+```typescript
+import { createAgent, createMiddleware, HumanMessage } from "langchain";
+import * as z from "zod";
+
+const contextSchema = z.object({
+ userId: z.string(),
+ tenantId: z.string(),
+ apiKey: z.string().optional(),
+});
+
+const userContextMiddleware = createMiddleware({
+ name: "UserContextMiddleware",
+ contextSchema,
+ wrapModelCall: (request, handler) => {
+ // Access context from runtime
+ const { userId, tenantId } = request.runtime.context;
+
+ // Add user context to system message
+ const contextText = `User ID: ${userId}, Tenant: ${tenantId}`;
+ const newSystemMessage = request.systemMessage.concat(contextText);
+
+ return handler({
+ ...request,
+ systemMessage: newSystemMessage,
+ });
+ },
+});
+
+const agent = createAgent({
+ model: "gpt-4o",
+ middleware: [userContextMiddleware],
+ tools: [],
+ contextSchema,
+});
+
+const result = await agent.invoke(
+ { messages: [new HumanMessage("Hello")] },
+ // Required fields (userId, tenantId) must be provided
+ {
+ context: {
+ userId: "user-123",
+ tenantId: "acme-corp",
+ },
+ }
+);
+```
+
+**Required context fields**: When you define required fields in your `contextSchema` (fields without `.optional()` or `.default()`), TypeScript will enforce that these fields must be provided during `agent.invoke()` calls. This ensures type safety and prevents runtime errors from missing required context.
+
+```typescript
+// This will cause a TypeScript error if userId or tenantId are missing
+const result = await agent.invoke(
+ { messages: [new HumanMessage("Hello")] },
+ { context: { userId: "user-123" } } // Error: tenantId is required
+);
+```
+
:::
## Execution order
@@ -623,23 +841,39 @@ class BlockedContentMiddleware(AgentMiddleware):
:::js
```typescript
-import { createMiddleware, AIMessage } from "langchain";
+import { createAgent, createMiddleware, AIMessage } from "langchain";
-const blockedContentMiddleware = createMiddleware({
- name: "BlockedContentMiddleware",
- afterModel: (state) => {
- const lastMessage = state.messages[state.messages.length - 1];
- if (lastMessage.content.includes("BLOCKED")) {
- return {
- messages: [new AIMessage("I cannot respond to that request.")],
- jumpTo: "end",
- };
- }
- return;
- },
+const agent = createAgent({
+ model: "gpt-4o",
+ middleware: [
+ createMiddleware({
+ name: "BlockedContentMiddleware",
+ beforeModel: {
+ canJumpTo: ["end"],
+ hook: (state) => {
+ if (state.messages.at(-1)?.content.includes("BLOCKED")) {
+ return {
+ messages: [new AIMessage("I cannot respond to that request.")],
+ jumpTo: "end" as const,
+ };
+ }
+ return;
+ },
+ },
+ }),
+ ],
});
-```
+const result = await agent.invoke({
+ messages: "Hello, world! BLOCKED"
+});
+
+/**
+ * Expected output:
+ * I cannot respond to that request.
+ */
+console.log(result.messages.at(-1)?.content);
+```
:::
## Best practices
@@ -919,6 +1153,201 @@ const agent = createAgent({
:::
+### Working with system messages
+
+:::python
+
+Modify system messages in middleware using the `system_message` field on `ModelRequest`. The `system_message` field contains a @[`SystemMessage`] object (even if the agent was created with a string `system_prompt`).
+
+**Example: Adding context to system message**
+
+
+
+
+```python
+from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse
+from langchain.messages import SystemMessage
+from typing import Callable
+
+
+@wrap_model_call
+def add_context(
+ request: ModelRequest,
+ handler: Callable[[ModelRequest], ModelResponse],
+) -> ModelResponse:
+ # Always work with content blocks
+ new_content = list(request.system_message.content_blocks) + [
+ {"type": "text", "text": "Additional context."}
+ ]
+ new_system_message = SystemMessage(content=new_content)
+ return handler(request.override(system_message=new_system_message))
+```
+
+
+
+
+
+```python
+from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse
+from langchain.messages import SystemMessage
+from typing import Callable
+
+
+class ContextMiddleware(AgentMiddleware):
+ def wrap_model_call(
+ self,
+ request: ModelRequest,
+ handler: Callable[[ModelRequest], ModelResponse],
+ ) -> ModelResponse:
+ # Always work with content blocks
+ new_content = list(request.system_message.content_blocks) + [
+ {"type": "text", "text": "Additional context."}
+ ]
+ new_system_message = SystemMessage(content=new_content)
+ return handler(request.override(system_message=new_system_message))
+```
+
+
+
+
+**Example: Working with cache control (Anthropic)**
+
+When working with Anthropic models, you can use structured content blocks with cache control directives to cache large system prompts:
+
+
+
+
+```python
+from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse
+from langchain.messages import SystemMessage
+from typing import Callable
+
+
+@wrap_model_call
+def add_cached_context(
+ request: ModelRequest,
+ handler: Callable[[ModelRequest], ModelResponse],
+) -> ModelResponse:
+ # Always work with content blocks
+ new_content = list(request.system_message.content_blocks) + [
+ {
+ "type": "text",
+ "text": "Here is a large document to analyze:\n\n...",
+ # content up until this point is cached
+ "cache_control": {"type": "ephemeral"}
+ }
+ ]
+
+ new_system_message = SystemMessage(content=new_content)
+ return handler(request.override(system_message=new_system_message))
+```
+
+
+
+
+
+```python
+from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse
+from langchain.messages import SystemMessage
+from typing import Callable
+
+
+class CachedContextMiddleware(AgentMiddleware):
+ def wrap_model_call(
+ self,
+ request: ModelRequest,
+ handler: Callable[[ModelRequest], ModelResponse],
+ ) -> ModelResponse:
+ # Always work with content blocks
+ new_content = list(request.system_message.content_blocks) + [
+ {
+ "type": "text",
+ "text": "Here is a large document to analyze:\n\n...",
+ "cache_control": {"type": "ephemeral"} # This content will be cached
+ }
+ ]
+
+ new_system_message = SystemMessage(content=new_content)
+ return handler(request.override(system_message=new_system_message))
+```
+
+
+
+
+**Notes:**
+- `ModelRequest.system_message` is always a @[`SystemMessage`] object, even if the agent was created with `system_prompt="string"`
+- Use `SystemMessage.content_blocks` to access content as a list of blocks, regardless of whether the original content was a string or list
+- When modifying system messages, use `content_blocks` and append new blocks to preserve existing structure
+- You can pass @[`SystemMessage`] objects directly to `create_agent`'s `system_prompt` parameter for advanced use cases like cache control
+
+:::
+
+:::js
+Modify system messages in middleware using the `systemMessage` field in `ModelRequest`. It contains a @[`SystemMessage`] object (even if the agent was created with a string @[`systemPrompt`]).
+
+**Example: Chaining middleware** - Different middleware can use different approaches:
+
+```typescript
+import { createMiddleware, SystemMessage, createAgent } from "langchain";
+
+// Middleware 1: Uses systemMessage with simple concatenation
+const myMiddleware = createMiddleware({
+ name: "MyMiddleware",
+ wrapModelCall: async (request, handler) => {
+ return handler({
+ ...request,
+ systemMessage: request.systemMessage.concat(`Additional context.`),
+ });
+ },
+});
+
+// Middleware 2: Uses systemMessage with structured content (preserves structure)
+const myOtherMiddleware = createMiddleware({
+ name: "MyOtherMiddleware",
+ wrapModelCall: async (request, handler) => {
+ return handler({
+ ...request,
+ systemMessage: request.systemMessage.concat(
+ new SystemMessage({
+ content: [
+ {
+ type: "text",
+ text: " More additional context. This will be cached.",
+ cache_control: { type: "ephemeral", ttl: "5m" },
+ },
+ ],
+ })
+ ),
+ });
+ },
+});
+
+const agent = createAgent({
+ model: "anthropic:claude-3-5-sonnet",
+ systemPrompt: "You are a helpful assistant.",
+ middleware: [myMiddleware, myOtherMiddleware],
+});
+```
+
+The resulting system message will be:
+```typescript
+new SystemMessage({
+ content: [
+ { type: "text", text: "You are a helpful assistant." },
+ { type: "text", text: "Additional context." },
+ {
+ type: "text",
+ text: " More additional context. This will be cached.",
+ cache_control: { type: "ephemeral", ttl: "5m" },
+ },
+ ],
+});
+```
+
+Use @[`SystemMessage.concat`] to preserve cache control metadata or structured content blocks created by other middleware.
+
+:::
+
## Additional resources
- [Middleware API reference](https://reference.langchain.com/python/langchain/middleware/)
diff --git a/src/oss/langchain/models.mdx b/src/oss/langchain/models.mdx
index d51f18f8a5..d8d5e85393 100644
--- a/src/oss/langchain/models.mdx
+++ b/src/oss/langchain/models.mdx
@@ -46,7 +46,7 @@ response = model.invoke("Why do parrots talk?")
See @[`init_chat_model`][init_chat_model] for more detail, including information on how to pass model [parameters](#parameters).
:::
:::js
-The easiest way to get started with a standalone model in LangChain is to use `initChatModel` to initialize one from a [provider](/oss/integrations/providers/overview) of your choice (examples below):
+The easiest way to get started with a standalone model in LangChain is to use `initChatModel` to initialize one from a [chat model provider](/oss/integrations/chat) of your choice (examples below):
```typescript
@@ -1249,11 +1249,11 @@ to avoid mutating shared state.
**Option 2 (fix data upstream)**
-The primary source for the data is the [models.dev](https://models.dev/) project. These data are merged with additional fields and overrides in LangChain [integration packages](/oss/python/integrations/providers/overview) and are shipped with those packages.
+The primary source for the data is the [models.dev](https://models.dev/) project. These data are merged with additional fields and overrides in LangChain [integration packages](/oss/integrations/providers/overview) and are shipped with those packages.
Model profile data can be updated through the following process:
1. (If needed) update the source data at [models.dev](https://models.dev/) through a pull request to its [repository on Github](https://github.com/sst/models.dev).
-2. (If needed) update additional fields and overrides in `langchain_/data/profile_augmentations.toml` through a pull request to the LangChain [integration package](/oss/python/integrations/providers/overview)`.
+2. (If needed) update additional fields and overrides in `langchain_/data/profile_augmentations.toml` through a pull request to the LangChain [integration package](/oss/integrations/providers/overview)`.
3. Use the [langchain-model-profiles](https://pypi.org/project/langchain-model-profiles/) CLI tool to pull the latest data from [models.dev](https://models.dev/), merge in the augmentations and update the profile data:
```bash
@@ -1313,11 +1313,11 @@ const model = initChatModel("...", { profile: customProfile });
**Option 2 (fix data upstream)**
-The primary source for the data is the [models.dev](https://models.dev/) project. These data are merged with additional fields and overrides in LangChain [integration packages](/oss/javascript/integrations/providers/overview) and are shipped with those packages.
+The primary source for the data is the [models.dev](https://models.dev/) project. These data are merged with additional fields and overrides in LangChain [integration packages](/oss/integrations/providers/overview) and are shipped with those packages.
Model profile data can be updated through the following process:
1. (If needed) update the source data at [models.dev](https://models.dev/) through a pull request to its [repository on Github](https://github.com/sst/models.dev).
-2. (If needed) update additional fields and overrides in `langchain-/profiles.toml` through a pull request to the LangChain [integration package](/oss/javascript/integrations/providers/overview).
+2. (If needed) update additional fields and overrides in `langchain-/profiles.toml` through a pull request to the LangChain [integration package](/oss/integrations/providers/overview).
:::
diff --git a/src/oss/langchain/observability.mdx b/src/oss/langchain/observability.mdx
index 61d4778bf5..ba7cf26fd7 100644
--- a/src/oss/langchain/observability.mdx
+++ b/src/oss/langchain/observability.mdx
@@ -5,8 +5,17 @@ sidebarTitle: Observability
import observability from '/snippets/oss/observability.mdx';
+:::python
+
As you build and run agents with LangChain, you need visibility into how they behave: which [tools](/oss/langchain/tools) they call, what prompts they generate, and how they make decisions. LangChain agents built with @[`create_agent`] automatically support tracing through [LangSmith](/langsmith/home), a platform for capturing, debugging, evaluating, and monitoring LLM application behavior.
+:::
+:::js
+
+As you build and run agents with LangChain, you need visibility into how they behave: which [tools](/oss/langchain/tools) they call, what prompts they generate, and how they make decisions. LangChain agents built with @[`createAgent`] automatically support tracing through [LangSmith](/langsmith/home), a platform for capturing, debugging, evaluating, and monitoring LLM application behavior.
+
+:::
+
[_Traces_](/langsmith/observability-concepts#traces) record every step of your agent's execution, from the initial user input to the final response, including all tool calls, model interactions, and decision points. This execution data helps you debug issues, evaluate performance across different inputs, and monitor usage patterns in production.
This guide shows you how to enable tracing for your LangChain agents and use LangSmith to analyze their execution.
diff --git a/src/oss/langchain/rag.mdx b/src/oss/langchain/rag.mdx
index f9813cf55b..fd10c9d1df 100644
--- a/src/oss/langchain/rag.mdx
+++ b/src/oss/langchain/rag.mdx
@@ -897,8 +897,17 @@ const agent = createAgent({
## Next steps
+:::python
+
Now that we've implemented a simple RAG application via @[`create_agent`], we can easily incorporate new features and go deeper:
+:::
+:::js
+
+Now that we've implemented a simple RAG application via @[`createAgent`], we can easily incorporate new features and go deeper:
+
+:::
+
- [Stream](/oss/langchain/streaming) tokens and other information for responsive user experiences
- Add [conversational memory](/oss/langchain/short-term-memory) to support multi-turn interactions
- Add [long-term memory](/oss/langchain/long-term-memory) to support memory across conversational threads
diff --git a/src/oss/langgraph/errors/INVALID_CHAT_HISTORY.mdx b/src/oss/langgraph/errors/INVALID_CHAT_HISTORY.mdx
index b23e2e7009..023b73aaf4 100644
--- a/src/oss/langgraph/errors/INVALID_CHAT_HISTORY.mdx
+++ b/src/oss/langgraph/errors/INVALID_CHAT_HISTORY.mdx
@@ -3,11 +3,11 @@ title: INVALID_CHAT_HISTORY
---
:::python
-This error is raised in the prebuilt @[create_agent][create_agent] when the `call_model` graph node receives a malformed list of messages. Specifically, it is malformed when there are `AIMessages` with `tool_calls` (LLM requesting to call a tool) that do not have a corresponding @[`ToolMessage`] (result of a tool invocation to return to the LLM).
+This error is raised in the prebuilt @[`create_agent`] when the `call_model` graph node receives a malformed list of messages. Specifically, it is malformed when there are `AIMessages` with `tool_calls` (LLM requesting to call a tool) that do not have a corresponding @[`ToolMessage`] (result of a tool invocation to return to the LLM).
:::
:::js
-This error is raised in the prebuilt @[createAgent][create_agent] when the `callModel` graph node receives a malformed list of messages. Specifically, it is malformed when there are `AIMessage`s with `tool_calls` (LLM requesting to call a tool) that do not have a corresponding @[`ToolMessage`] (result of a tool invocation to return to the LLM).
+This error is raised in the prebuilt @[`createAgent`] when the `callModel` graph node receives a malformed list of messages. Specifically, it is malformed when there are `AIMessage`s with `tool_calls` (LLM requesting to call a tool) that do not have a corresponding @[`ToolMessage`] (result of a tool invocation to return to the LLM).
:::
There could be a few reasons you're seeing this error:
diff --git a/src/oss/langgraph/test.mdx b/src/oss/langgraph/test.mdx
index cfe3b62e68..70f29f188b 100644
--- a/src/oss/langgraph/test.mdx
+++ b/src/oss/langgraph/test.mdx
@@ -6,7 +6,14 @@ title: Test
After you've prototyped your LangGraph agent, a natural next step is to add tests. This guide covers some useful patterns you can use when writing unit tests.
+:::python
Note that this guide is LangGraph-specific and covers scenarios around graphs with custom structures - if you are just getting started, check out [this section](/oss/langchain/test/) that uses LangChain's built-in @[`create_agent`] instead.
+:::
+
+:::js
+Note that this guide is LangGraph-specific and covers scenarios around graphs with custom structures - if you are just getting started, check out [this section](/oss/langchain/test/) that uses LangChain's built-in @[`createAgent`] instead.
+:::
+
## Prerequisites
diff --git a/src/oss/python/releases/changelog.mdx b/src/oss/python/releases/changelog.mdx
new file mode 100644
index 0000000000..aea1b9ca42
--- /dev/null
+++ b/src/oss/python/releases/changelog.mdx
@@ -0,0 +1,23 @@
+---
+title: Changelog
+sidebarTitle: Changelog
+---
+
+A chronological log of updates and improvements to LangChain Python.
+
+## v1.1 (11/25/2025)
+
+* [Model profiles](/oss/langchain/models#model-profiles): Chat models now expose supported features and capabilities through a `.profile` attribute. These data are derived from [models.dev](https://github.com/sst/models.dev), an open source project providing model capability data.
+* [Summarization middleware](/oss/langchain/middleware/built-in#summarization): Updated to support flexible trigger points using model profiles for context-aware summarization.
+* [Structured output](/oss/langchain/structured-output): `ProviderStrategy` support (native structured output) can now be inferred from model profiles.
+* [`SystemMessage` for `create_agent`](/oss/langchain/middleware/custom#working-with-system-messages): Support for passing `SystemMessage` instances directly to `create_agent`'s `system_prompt` parameter, enabling advanced features like cache control and structured content blocks.
+* [Model retry middleware](/oss/langchain/middleware/built-in#model-retry): New middleware for automatically retrying failed model calls with configurable exponential backoff.
+* [Content moderation middleware](/oss/langchain/middleware/built-in#content-moderation): OpenAI content moderation middleware for detecting and handling unsafe content in agent interactions. Supports checking user input, model output, and tool results.
+
+## Resources
+
+- [V1 Migration guide](/oss/migrate/langchain-v1) - How to migrate to LangChain v1
+- [V1 Release notes](/oss/releases/langchain-v1) - Detailed release notes
+- [Release policy](/oss/release-policy) - Detailed release policies
+- [Versioning](/oss/versioning) - Understanding version numbers
+- [Report issues on GitHub](https://github.com/langchain-ai/langchain/issues)
diff --git a/src/oss/python/releases/langchain-v1.mdx b/src/oss/python/releases/langchain-v1.mdx
index c6b387bc83..904b792976 100644
--- a/src/oss/python/releases/langchain-v1.mdx
+++ b/src/oss/python/releases/langchain-v1.mdx
@@ -1,6 +1,6 @@
---
title: What's new in v1
-sidebarTitle: Release notes
+sidebarTitle: V1 Release Notes
---
**LangChain v1 is a focused, production-ready foundation for building agents.** We've streamlined the framework around three core improvements:
@@ -381,7 +381,7 @@ Please report any issues discovered with 1.0 on [GitHub](https://github.com/lang
Read the announcement
-
+
Deep dive into middleware