diff --git a/.claude/commands/generate-guide.md b/.claude/commands/generate-guide.md
index 3eddcb25ac..862c9101fe 100644
--- a/.claude/commands/generate-guide.md
+++ b/.claude/commands/generate-guide.md
@@ -49,16 +49,16 @@ Identify agent vs client roles from the discovered source files:
## 3. Read reference files
- Find and read an existing guide to use as a structural template. Try in order:
- 1. Same pattern, different provider: glob `src/pages/docs/guides/ai-transport/*-{pattern}.mdx`
- 2. Same provider, any pattern: glob `src/pages/docs/guides/ai-transport/{provider-slug}-*.mdx`
- 3. Any existing guide: glob `src/pages/docs/guides/ai-transport/*.mdx` and pick one
+ 1. Same pattern, different provider: glob `src/pages/docs/ai-transport/guides/*-{pattern}.mdx`
+ 2. Same provider, any pattern: glob `src/pages/docs/ai-transport/guides/{provider-slug}-*.mdx`
+ 3. Any existing guide: glob `src/pages/docs/ai-transport/guides/*.mdx` and pick one
- Read `writing-style-guide.md` for tone rules.
- Read `src/data/nav/aitransport.ts` for the current nav structure.
- Read `src/data/languages/languageData.ts` to confirm supported languages for aiTransport.
## 4. Generate the guide
-Write to `src/pages/docs/guides/ai-transport/{provider-slug}-{pattern}.mdx`.
+Write to `src/pages/docs/ai-transport/guides/{provider-slug}-{pattern}.mdx`.
If the file already exists, warn and ask before overwriting.
@@ -155,7 +155,7 @@ Edit `src/data/nav/aitransport.ts`. The Guides section uses nested provider grou
{
name: '{Provider display name}',
pages: [
- { name: '{Pattern display name}', link: '/docs/guides/ai-transport/{provider-slug}-{pattern}' },
+ { name: '{Pattern display name}', link: '/docs/ai-transport/guides/{provider-slug}-{pattern}' },
],
},
```
diff --git a/.gitignore b/.gitignore
index 4945c8388a..3d5158aed7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,7 @@ node_modules/
.cache/
public
!examples/*/*/public
+.env
.env.*
!.env.example
graphql-types.ts
diff --git a/src/data/nav/aitransport.ts b/src/data/nav/aitransport.ts
index bfbf1e0b27..2f52c27ab7 100644
--- a/src/data/nav/aitransport.ts
+++ b/src/data/nav/aitransport.ts
@@ -98,19 +98,19 @@ export default {
pages: [
{
name: 'Message per response',
- link: '/docs/guides/ai-transport/anthropic/anthropic-message-per-response',
+ link: '/docs/ai-transport/guides/anthropic/anthropic-message-per-response',
},
{
name: 'Message per token',
- link: '/docs/guides/ai-transport/anthropic/anthropic-message-per-token',
+ link: '/docs/ai-transport/guides/anthropic/anthropic-message-per-token',
},
{
name: 'Human-in-the-loop',
- link: '/docs/guides/ai-transport/anthropic/anthropic-human-in-the-loop',
+ link: '/docs/ai-transport/guides/anthropic/anthropic-human-in-the-loop',
},
{
name: 'Citations',
- link: '/docs/guides/ai-transport/anthropic/anthropic-citations',
+ link: '/docs/ai-transport/guides/anthropic/anthropic-citations',
},
],
},
@@ -119,19 +119,19 @@ export default {
pages: [
{
name: 'Message per response',
- link: '/docs/guides/ai-transport/openai/openai-message-per-response',
+ link: '/docs/ai-transport/guides/openai/openai-message-per-response',
},
{
name: 'Message per token',
- link: '/docs/guides/ai-transport/openai/openai-message-per-token',
+ link: '/docs/ai-transport/guides/openai/openai-message-per-token',
},
{
name: 'Human-in-the-loop',
- link: '/docs/guides/ai-transport/openai/openai-human-in-the-loop',
+ link: '/docs/ai-transport/guides/openai/openai-human-in-the-loop',
},
{
name: 'Citations',
- link: '/docs/guides/ai-transport/openai/openai-citations',
+ link: '/docs/ai-transport/guides/openai/openai-citations',
},
],
},
@@ -140,11 +140,11 @@ export default {
pages: [
{
name: 'Message per response',
- link: '/docs/guides/ai-transport/langgraph/lang-graph-message-per-response',
+ link: '/docs/ai-transport/guides/langgraph/lang-graph-message-per-response',
},
{
name: 'Message per token',
- link: '/docs/guides/ai-transport/langgraph/lang-graph-message-per-token',
+ link: '/docs/ai-transport/guides/langgraph/lang-graph-message-per-token',
},
],
},
@@ -153,11 +153,11 @@ export default {
pages: [
{
name: 'Message per response',
- link: '/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-response',
+ link: '/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-response',
},
{
name: 'Message per token',
- link: '/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-token',
+ link: '/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-token',
},
],
},
diff --git a/src/data/nav/chat.ts b/src/data/nav/chat.ts
index 68f13dd2d8..af612eee13 100644
--- a/src/data/nav/chat.ts
+++ b/src/data/nav/chat.ts
@@ -228,11 +228,11 @@ export default {
pages: [
{
name: 'Livestream chat',
- link: '/docs/guides/chat/build-livestream',
+ link: '/docs/chat/guides/build-livestream',
},
{
name: 'Handling discontinuity',
- link: '/docs/guides/chat/handling-discontinuity',
+ link: '/docs/chat/guides/handling-discontinuity',
},
],
},
diff --git a/src/data/nav/pubsub.ts b/src/data/nav/pubsub.ts
index 30454c9ded..6936a8f9ab 100644
--- a/src/data/nav/pubsub.ts
+++ b/src/data/nav/pubsub.ts
@@ -361,15 +361,15 @@ export default {
pages: [
{
name: 'Data streaming',
- link: '/docs/guides/pub-sub/data-streaming',
+ link: '/docs/pub-sub/guides/data-streaming',
},
{
name: 'Dashboards and visualizations',
- link: '/docs/guides/pub-sub/dashboards-and-visualizations',
+ link: '/docs/pub-sub/guides/dashboards-and-visualizations',
},
{
name: 'Handling discontinuity',
- link: '/docs/guides/pub-sub/handling-discontinuity',
+ link: '/docs/pub-sub/guides/handling-discontinuity',
},
],
},
diff --git a/src/pages/docs/guides/ai-transport/anthropic/anthropic-citations.mdx b/src/pages/docs/ai-transport/guides/anthropic/anthropic-citations.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/anthropic/anthropic-citations.mdx
rename to src/pages/docs/ai-transport/guides/anthropic/anthropic-citations.mdx
index f9489d161f..1533e4b683 100644
--- a/src/pages/docs/guides/ai-transport/anthropic/anthropic-citations.mdx
+++ b/src/pages/docs/ai-transport/guides/anthropic/anthropic-citations.mdx
@@ -2,6 +2,8 @@
title: "Guide: Attach citations to Anthropic responses using message annotations"
meta_description: "Attach source citations to AI responses from the Anthropic Messages API using Ably message annotations."
meta_keywords: "AI, citations, Anthropic, Claude, Messages API, AI transport, Ably, realtime, message annotations, source attribution"
+redirect_from:
+ - /docs/guides/ai-transport/anthropic/anthropic-citations
---
This guide shows you how to attach source citations to AI responses from Anthropic's [Messages API](https://docs.anthropic.com/en/api/messages) using Ably [message annotations](/docs/messages/annotations). When Anthropic provides citations from documents or search results, you can publish them as annotations on Ably messages, enabling clients to display source references alongside AI responses in realtime.
diff --git a/src/pages/docs/guides/ai-transport/anthropic/anthropic-human-in-the-loop.mdx b/src/pages/docs/ai-transport/guides/anthropic/anthropic-human-in-the-loop.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/anthropic/anthropic-human-in-the-loop.mdx
rename to src/pages/docs/ai-transport/guides/anthropic/anthropic-human-in-the-loop.mdx
index bb6bc11a71..c14bc31a42 100644
--- a/src/pages/docs/guides/ai-transport/anthropic/anthropic-human-in-the-loop.mdx
+++ b/src/pages/docs/ai-transport/guides/anthropic/anthropic-human-in-the-loop.mdx
@@ -2,6 +2,8 @@
title: "Guide: Human-in-the-loop approval with Anthropic"
meta_description: "Implement human approval workflows for AI agent tool calls using Anthropic and Ably with role-based access control."
meta_keywords: "AI, human in the loop, HITL, Anthropic, Claude, tool use, approval workflow, AI transport, Ably, realtime, RBAC"
+redirect_from:
+ - /docs/guides/ai-transport/anthropic/anthropic-human-in-the-loop
---
This guide shows you how to implement a human-in-the-loop (HITL) approval workflow for AI agent tool calls using Anthropic and Ably. The agent requests human approval before executing sensitive operations, with role-based access control to verify approvers have sufficient permissions.
diff --git a/src/pages/docs/guides/ai-transport/anthropic/anthropic-message-per-response.mdx b/src/pages/docs/ai-transport/guides/anthropic/anthropic-message-per-response.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/anthropic/anthropic-message-per-response.mdx
rename to src/pages/docs/ai-transport/guides/anthropic/anthropic-message-per-response.mdx
index b73632f935..3005598a3b 100644
--- a/src/pages/docs/guides/ai-transport/anthropic/anthropic-message-per-response.mdx
+++ b/src/pages/docs/ai-transport/guides/anthropic/anthropic-message-per-response.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from the Anthropic Messages API over Ably in re
meta_keywords: "AI, token streaming, Anthropic, Claude, Messages API, AI transport, Ably, realtime, message appends"
redirect_from:
- /docs/guides/ai-transport/anthropic-message-per-response
+ - /docs/guides/ai-transport/anthropic/anthropic-message-per-response
---
This guide shows you how to stream AI responses from Anthropic's [Messages API](https://docs.anthropic.com/en/api/messages) over Ably using the [message-per-response pattern](/docs/ai-transport/token-streaming/message-per-response). Specifically, it appends each response token to a single Ably message, creating a complete AI response that grows incrementally while delivering tokens in realtime.
diff --git a/src/pages/docs/guides/ai-transport/anthropic/anthropic-message-per-token.mdx b/src/pages/docs/ai-transport/guides/anthropic/anthropic-message-per-token.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/anthropic/anthropic-message-per-token.mdx
rename to src/pages/docs/ai-transport/guides/anthropic/anthropic-message-per-token.mdx
index d9af3500c1..56fa661b62 100644
--- a/src/pages/docs/guides/ai-transport/anthropic/anthropic-message-per-token.mdx
+++ b/src/pages/docs/ai-transport/guides/anthropic/anthropic-message-per-token.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from the Anthropic Messages API over Ably in re
meta_keywords: "AI, token streaming, Anthropic, Claude, Messages API, AI transport, Ably, realtime"
redirect_from:
- /docs/guides/ai-transport/anthropic-message-per-token
+ - /docs/guides/ai-transport/anthropic/anthropic-message-per-token
---
This guide shows you how to stream AI responses from Anthropic's [Messages API](https://docs.anthropic.com/en/api/messages) over Ably using the [message-per-token pattern](/docs/ai-transport/token-streaming/message-per-token). Specifically, it implements the [explicit start/stop events approach](/docs/ai-transport/token-streaming/message-per-token#explicit-events), which publishes each response token as an individual message, along with explicit lifecycle events to signal when responses begin and end.
diff --git a/src/pages/docs/guides/ai-transport/langgraph/lang-graph-message-per-response.mdx b/src/pages/docs/ai-transport/guides/langgraph/lang-graph-message-per-response.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/langgraph/lang-graph-message-per-response.mdx
rename to src/pages/docs/ai-transport/guides/langgraph/lang-graph-message-per-response.mdx
index a448211534..3fb7968ce4 100644
--- a/src/pages/docs/guides/ai-transport/langgraph/lang-graph-message-per-response.mdx
+++ b/src/pages/docs/ai-transport/guides/langgraph/lang-graph-message-per-response.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from LangGraph over Ably in realtime using mess
meta_keywords: "AI, token streaming, LangGraph, LangChain, Anthropic, AI transport, Ably, realtime, message appends"
redirect_from:
- /docs/guides/ai-transport/lang-graph-message-per-response
+ - /docs/guides/ai-transport/langgraph/lang-graph-message-per-response
---
This guide shows you how to stream AI responses from [LangGraph](https://docs.langchain.com/oss/javascript/langgraph/overview) over Ably using the [message-per-response pattern](/docs/ai-transport/token-streaming/message-per-response). Specifically, it appends each response token to a single Ably message, creating a complete AI response that grows incrementally while delivering tokens in realtime.
diff --git a/src/pages/docs/guides/ai-transport/langgraph/lang-graph-message-per-token.mdx b/src/pages/docs/ai-transport/guides/langgraph/lang-graph-message-per-token.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/langgraph/lang-graph-message-per-token.mdx
rename to src/pages/docs/ai-transport/guides/langgraph/lang-graph-message-per-token.mdx
index 319e41b3ca..164d864505 100644
--- a/src/pages/docs/guides/ai-transport/langgraph/lang-graph-message-per-token.mdx
+++ b/src/pages/docs/ai-transport/guides/langgraph/lang-graph-message-per-token.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from LangGraph over Ably in realtime."
meta_keywords: "AI, token streaming, LangGraph, LangChain, Anthropic, AI transport, Ably, realtime"
redirect_from:
- /docs/guides/ai-transport/lang-graph-message-per-token
+ - /docs/guides/ai-transport/langgraph/lang-graph-message-per-token
---
This guide shows you how to stream AI responses from [LangGraph](https://docs.langchain.com/oss/javascript/langgraph/overview) over Ably using the [message-per-token pattern](/docs/ai-transport/token-streaming/message-per-token). Specifically, it implements the [explicit start/stop events approach](/docs/ai-transport/token-streaming/message-per-token#explicit-events), which publishes each response token as an individual message, along with explicit lifecycle events to signal when responses begin and end.
diff --git a/src/pages/docs/guides/ai-transport/openai/openai-citations.mdx b/src/pages/docs/ai-transport/guides/openai/openai-citations.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/openai/openai-citations.mdx
rename to src/pages/docs/ai-transport/guides/openai/openai-citations.mdx
index 817f6bebf9..9779f99e8b 100644
--- a/src/pages/docs/guides/ai-transport/openai/openai-citations.mdx
+++ b/src/pages/docs/ai-transport/guides/openai/openai-citations.mdx
@@ -2,6 +2,8 @@
title: "Guide: Attach citations to OpenAI responses using message annotations"
meta_description: "Attach source citations to AI responses from the OpenAI Responses API using Ably message annotations."
meta_keywords: "AI, citations, OpenAI, Responses API, AI transport, Ably, realtime, message annotations, source attribution, web search"
+redirect_from:
+ - /docs/guides/ai-transport/openai/openai-citations
---
This guide shows you how to attach source citations to AI responses from OpenAI's [Responses API](https://platform.openai.com/docs/api-reference/responses) using Ably [message annotations](/docs/messages/annotations). When OpenAI provides citations from web search results, you can publish them as annotations on Ably messages, enabling clients to display source references alongside AI responses in realtime.
diff --git a/src/pages/docs/guides/ai-transport/openai/openai-human-in-the-loop.mdx b/src/pages/docs/ai-transport/guides/openai/openai-human-in-the-loop.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/openai/openai-human-in-the-loop.mdx
rename to src/pages/docs/ai-transport/guides/openai/openai-human-in-the-loop.mdx
index 8e0ba5235a..51a3a49084 100644
--- a/src/pages/docs/guides/ai-transport/openai/openai-human-in-the-loop.mdx
+++ b/src/pages/docs/ai-transport/guides/openai/openai-human-in-the-loop.mdx
@@ -2,6 +2,8 @@
title: "Guide: Human-in-the-loop approval with OpenAI"
meta_description: "Implement human approval workflows for AI agent tool calls using OpenAI and Ably with role-based access control."
meta_keywords: "AI, human in the loop, HITL, OpenAI, tool calls, approval workflow, AI transport, Ably, realtime, RBAC"
+redirect_from:
+ - /docs/guides/ai-transport/openai/openai-human-in-the-loop
---
This guide shows you how to implement a human-in-the-loop (HITL) approval workflow for AI agent tool calls using OpenAI and Ably. The agent requests human approval before executing sensitive operations, with role-based access control to verify approvers have sufficient permissions.
diff --git a/src/pages/docs/guides/ai-transport/openai/openai-message-per-response.mdx b/src/pages/docs/ai-transport/guides/openai/openai-message-per-response.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/openai/openai-message-per-response.mdx
rename to src/pages/docs/ai-transport/guides/openai/openai-message-per-response.mdx
index 799517c1b6..d35cdcd803 100644
--- a/src/pages/docs/guides/ai-transport/openai/openai-message-per-response.mdx
+++ b/src/pages/docs/ai-transport/guides/openai/openai-message-per-response.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from the OpenAI Responses API over Ably in real
meta_keywords: "AI, token streaming, OpenAI, Responses API, AI transport, Ably, realtime, message appends"
redirect_from:
- /docs/guides/ai-transport/openai-message-per-response
+ - /docs/guides/ai-transport/openai/openai-message-per-response
---
This guide shows you how to stream AI responses from OpenAI's [Responses API](https://platform.openai.com/docs/api-reference/responses) over Ably using the [message-per-response pattern](/docs/ai-transport/token-streaming/message-per-response). Specifically, it appends each response token to a single Ably message, creating a complete AI response that grows incrementally while delivering tokens in realtime.
diff --git a/src/pages/docs/guides/ai-transport/openai/openai-message-per-token.mdx b/src/pages/docs/ai-transport/guides/openai/openai-message-per-token.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/openai/openai-message-per-token.mdx
rename to src/pages/docs/ai-transport/guides/openai/openai-message-per-token.mdx
index e050704dce..fbd352dc5c 100644
--- a/src/pages/docs/guides/ai-transport/openai/openai-message-per-token.mdx
+++ b/src/pages/docs/ai-transport/guides/openai/openai-message-per-token.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from the OpenAI Responses API over Ably in real
meta_keywords: "AI, token streaming, OpenAI, Responses API, AI transport, Ably, realtime"
redirect_from:
- /docs/guides/ai-transport/openai-message-per-token
+ - /docs/guides/ai-transport/openai/openai-message-per-token
---
This guide shows you how to stream AI responses from OpenAI's [Responses API](https://platform.openai.com/docs/api-reference/responses) over Ably using the [message-per-token pattern](/docs/ai-transport/token-streaming/message-per-token). Specifically, it implements the [explicit start/stop events approach](/docs/ai-transport/token-streaming/message-per-token#explicit-events), which publishes each response token as an individual message, along with explicit lifecycle events to signal when responses begin and end.
diff --git a/src/pages/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-response.mdx b/src/pages/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-response.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-response.mdx
rename to src/pages/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-response.mdx
index 6276f2aa29..2a549bd7ec 100644
--- a/src/pages/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-response.mdx
+++ b/src/pages/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-response.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from the Vercel AI SDK over Ably in realtime us
meta_keywords: "AI, token streaming, Vercel, AI SDK, AI transport, Ably, realtime, message appends"
redirect_from:
- /docs/guides/ai-transport/vercel-message-per-response
+ - /docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-response
---
This guide shows you how to stream AI responses from the [Vercel AI SDK](https://ai-sdk.dev/docs/ai-sdk-core/generating-text) over Ably using the [message-per-response pattern](/docs/ai-transport/token-streaming/message-per-response). Specifically, it appends each response token to a single Ably message, creating a complete AI response that grows incrementally while delivering tokens in realtime.
diff --git a/src/pages/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-token.mdx b/src/pages/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-token.mdx
similarity index 99%
rename from src/pages/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-token.mdx
rename to src/pages/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-token.mdx
index 97354e9f49..89fc558354 100644
--- a/src/pages/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-token.mdx
+++ b/src/pages/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-token.mdx
@@ -4,6 +4,7 @@ meta_description: "Stream tokens from the Vercel AI SDK over Ably in realtime."
meta_keywords: "AI, token streaming, Vercel, AI SDK, AI transport, Ably, realtime"
redirect_from:
- /docs/guides/ai-transport/vercel-message-per-token
+ - /docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-token
---
This guide shows you how to stream AI responses from the [Vercel AI SDK](https://ai-sdk.dev/docs/ai-sdk-core/generating-text) over Ably using the [message-per-token pattern](/docs/ai-transport/token-streaming/message-per-token). Specifically, it implements the [explicit start/stop events approach](/docs/ai-transport/token-streaming/message-per-token#explicit-events), which publishes each response token as an individual message, along with explicit lifecycle events to signal when responses begin and end.
diff --git a/src/pages/docs/ai-transport/index.mdx b/src/pages/docs/ai-transport/index.mdx
index 9b4e25aa0c..6f0e17a5c1 100644
--- a/src/pages/docs/ai-transport/index.mdx
+++ b/src/pages/docs/ai-transport/index.mdx
@@ -26,19 +26,19 @@ Use the following guides to get started with OpenAI:
title: 'Message-per-response',
description: 'Stream OpenAI responses using message appends',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/openai/openai-message-per-response',
+ link: '/docs/ai-transport/guides/openai/openai-message-per-response',
},
{
title: 'Message-per-token',
description: 'Stream OpenAI responses using individual token messages',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/openai/openai-message-per-token',
+ link: '/docs/ai-transport/guides/openai/openai-message-per-token',
},
{
title: 'Human-in-the-loop',
description: 'Implement human-in-the-loop approval workflows with OpenAI',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/openai/openai-human-in-the-loop',
+ link: '/docs/ai-transport/guides/openai/openai-human-in-the-loop',
},
]}
@@ -53,19 +53,19 @@ Use the following guides to get started with Anthropic:
title: 'Message-per-response',
description: 'Stream Anthropic responses using message appends',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/anthropic/anthropic-message-per-response',
+ link: '/docs/ai-transport/guides/anthropic/anthropic-message-per-response',
},
{
title: 'Message-per-token',
description: 'Stream Anthropic responses using individual token messages',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/anthropic/anthropic-message-per-token',
+ link: '/docs/ai-transport/guides/anthropic/anthropic-message-per-token',
},
{
title: 'Human-in-the-loop',
description: 'Implement human-in-the-loop approval workflows with Anthropic',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/anthropic/anthropic-human-in-the-loop',
+ link: '/docs/ai-transport/guides/anthropic/anthropic-human-in-the-loop',
},
]}
@@ -80,13 +80,13 @@ Use the following guides to get started with the Vercel AI SDK:
title: 'Message-per-response',
description: 'Stream Vercel AI SDK responses using message appends',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-response',
+ link: '/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-response',
},
{
title: 'Message-per-token',
description: 'Stream Vercel AI SDK responses using individual token messages',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/vercel-ai-sdk/vercel-message-per-token',
+ link: '/docs/ai-transport/guides/vercel-ai-sdk/vercel-message-per-token',
},
]}
@@ -101,13 +101,13 @@ Use the following guides to get started with LangGraph:
title: 'Message-per-response',
description: 'Stream LangGraph responses using message appends',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/langgraph/lang-graph-message-per-response',
+ link: '/docs/ai-transport/guides/langgraph/lang-graph-message-per-response',
},
{
title: 'Message-per-token',
description: 'Stream LangGraph responses using individual token messages',
image: 'icon-tech-javascript',
- link: '/docs/guides/ai-transport/langgraph/lang-graph-message-per-token',
+ link: '/docs/ai-transport/guides/langgraph/lang-graph-message-per-token',
},
]}
diff --git a/src/pages/docs/ai-transport/token-streaming/index.mdx b/src/pages/docs/ai-transport/token-streaming/index.mdx
index 16482d93b4..07450b91d6 100644
--- a/src/pages/docs/ai-transport/token-streaming/index.mdx
+++ b/src/pages/docs/ai-transport/token-streaming/index.mdx
@@ -89,5 +89,5 @@ Different models and frameworks use different events to signal streaming state,
- Implement token streaming with [message-per-response](/docs/ai-transport/token-streaming/message-per-response) (recommended for most applications)
- Implement token streaming with [message-per-token](/docs/ai-transport/token-streaming/message-per-token) for sliding-window use cases
-- Explore the [guides](/docs/guides/ai-transport/openai/openai-message-per-response) for integration with specific models and frameworks
+- Explore the [guides](/docs/ai-transport/guides/openai/openai-message-per-response) for integration with specific models and frameworks
- Learn about [sessions and identity](/docs/ai-transport/sessions-identity) in AI Transport applications
diff --git a/src/pages/docs/channels/options/deltas.mdx b/src/pages/docs/channels/options/deltas.mdx
index e5e47378df..75bfb30734 100644
--- a/src/pages/docs/channels/options/deltas.mdx
+++ b/src/pages/docs/channels/options/deltas.mdx
@@ -235,6 +235,6 @@ In these cases, the service indicates the discontinuity to the client, together
## Related optimization techniques
-- [Conflation](/docs/guides/pub-sub/data-streaming#conflation) delivers only the latest message per key in a time window. Use when intermediate values can be discarded.
-- [Server-side batching](/docs/guides/pub-sub/data-streaming#server-side-batching) groups messages into batches. Use when every message matters but delivery can be slightly delayed.
-- For a comparison of all optimization techniques, see the [Data Streaming guide](/docs/guides/pub-sub/data-streaming).
+- [Conflation](/docs/pub-sub/guides/data-streaming#conflation) delivers only the latest message per key in a time window. Use when intermediate values can be discarded.
+- [Server-side batching](/docs/pub-sub/guides/data-streaming#server-side-batching) groups messages into batches. Use when every message matters but delivery can be slightly delayed.
+- For a comparison of all optimization techniques, see the [Data Streaming guide](/docs/pub-sub/guides/data-streaming).
diff --git a/src/pages/docs/channels/options/index.mdx b/src/pages/docs/channels/options/index.mdx
index 70088ffd9f..a6e2f99999 100644
--- a/src/pages/docs/channels/options/index.mdx
+++ b/src/pages/docs/channels/options/index.mdx
@@ -10,7 +10,7 @@ redirect_from:
Channel options can be used to customize the functionality of channels. This includes enabling features such as [encryption](/docs/channels/options/encryption) and [deltas](/docs/channels/options/deltas), or for a client to retrieve messages published prior to it attaching to a channel using [rewind](/docs/channels/options/rewind).
Channel options are set under the following properties:
diff --git a/src/pages/docs/guides/chat/build-livestream.mdx b/src/pages/docs/chat/guides/build-livestream.mdx
similarity index 99%
rename from src/pages/docs/guides/chat/build-livestream.mdx
rename to src/pages/docs/chat/guides/build-livestream.mdx
index 497795d7ac..cb0b40edab 100644
--- a/src/pages/docs/guides/chat/build-livestream.mdx
+++ b/src/pages/docs/chat/guides/build-livestream.mdx
@@ -2,6 +2,8 @@
title: "Guide: Building livestream chat at scale with Ably"
meta_description: "Architecting livestream chat with Ably: performance, reliability, and cost at scale. Key decisions, technical depth, and why Ably is the right choice."
meta_keywords: "livestream, chat, scalability, Ably Chat, chat SDK, realtime messaging, dependability, cost optimisation"
+redirect_from:
+ - /docs/guides/chat/build-livestream
---
Ably Chat is purpose-built for livestream chat at scale. If you need to deliver a seamless, high-throughput chat experience to thousands or millions of users, without sacrificing performance, reliability, or costs then Ably is the proven choice.
diff --git a/src/pages/docs/guides/chat/handling-discontinuity.mdx b/src/pages/docs/chat/guides/handling-discontinuity.mdx
similarity index 97%
rename from src/pages/docs/guides/chat/handling-discontinuity.mdx
rename to src/pages/docs/chat/guides/handling-discontinuity.mdx
index 1a7f602b39..654ba94a51 100644
--- a/src/pages/docs/guides/chat/handling-discontinuity.mdx
+++ b/src/pages/docs/chat/guides/handling-discontinuity.mdx
@@ -2,12 +2,14 @@
title: "Guide: Handle discontinuity in Chat"
meta_description: "Detect and recover from message discontinuity in Ably Chat applications. Learn to use the onDiscontinuity handler and historyBeforeSubscribe to recover missed messages."
meta_keywords: "discontinuity, message continuity, onDiscontinuity, Chat, message recovery, missed messages, historyBeforeSubscribe"
+redirect_from:
+ - /docs/guides/chat/handling-discontinuity
---
When a client experiences a period of disconnection longer than the two-minute recovery window, or when Ably signals a loss of message continuity, your application may have missed messages. This is called a *discontinuity*. This guide explains how to detect and recover from discontinuities in Chat applications.
## What causes discontinuity
diff --git a/src/pages/docs/chat/rooms/messages.mdx b/src/pages/docs/chat/rooms/messages.mdx
index 1cc563e40e..dd54baf9b3 100644
--- a/src/pages/docs/chat/rooms/messages.mdx
+++ b/src/pages/docs/chat/rooms/messages.mdx
@@ -223,7 +223,7 @@ off()
## Send a message
diff --git a/src/pages/docs/guides/pub-sub/dashboards-and-visualizations.mdx b/src/pages/docs/pub-sub/guides/dashboards-and-visualizations.mdx
similarity index 99%
rename from src/pages/docs/guides/pub-sub/dashboards-and-visualizations.mdx
rename to src/pages/docs/pub-sub/guides/dashboards-and-visualizations.mdx
index 9edbdcac9f..57a08296de 100644
--- a/src/pages/docs/guides/pub-sub/dashboards-and-visualizations.mdx
+++ b/src/pages/docs/pub-sub/guides/dashboards-and-visualizations.mdx
@@ -2,6 +2,8 @@
title: "Guide: Building realtime dashboards with Ably"
meta_description: "Architecting realtime dashboards with Ably: from fan engagement at scale to critical monitoring. Key decisions, technical depth, and why Ably is the right choice."
meta_keywords: "realtime dashboard, pub/sub, fan engagement, patient monitoring, IoT dashboards, data streaming, scalability, cost optimization"
+redirect_from:
+ - /docs/guides/pub-sub/dashboards-and-visualizations
---
Ably Pub/Sub is purpose-built for delivering realtime data at any scale. Whether you're delivering live sports statistics to millions of fans, streaming critical patient vitals to a nurse's station, or updating stock prices across thousands of trading terminals, Ably handles the infrastructure so you can focus on your application.
diff --git a/src/pages/docs/guides/pub-sub/data-streaming.mdx b/src/pages/docs/pub-sub/guides/data-streaming.mdx
similarity index 99%
rename from src/pages/docs/guides/pub-sub/data-streaming.mdx
rename to src/pages/docs/pub-sub/guides/data-streaming.mdx
index e796bc5c4c..0da1c26039 100644
--- a/src/pages/docs/guides/pub-sub/data-streaming.mdx
+++ b/src/pages/docs/pub-sub/guides/data-streaming.mdx
@@ -2,6 +2,8 @@
title: "Guide: Data streaming and distribution with Ably"
meta_description: "Optimize data streaming at scale with Ably: reduce bandwidth with Deltas, manage bursts with server-side batching, ensure freshness with Conflation."
meta_keywords: "data streaming, pub/sub, deltas, conflation, server-side batching, bandwidth optimization, message distribution, scalability, cost optimization"
+redirect_from:
+ - /docs/guides/pub-sub/data-streaming
---
Ably is purpose-built for realtime high-throughput data streaming at scale. Whether you're distributing telemetry data, financial updates, or social media feeds, Ably handles the complexity of message distribution so you can focus on your application.
diff --git a/src/pages/docs/guides/pub-sub/handling-discontinuity.mdx b/src/pages/docs/pub-sub/guides/handling-discontinuity.mdx
similarity index 98%
rename from src/pages/docs/guides/pub-sub/handling-discontinuity.mdx
rename to src/pages/docs/pub-sub/guides/handling-discontinuity.mdx
index 3022443082..caa54384de 100644
--- a/src/pages/docs/guides/pub-sub/handling-discontinuity.mdx
+++ b/src/pages/docs/pub-sub/guides/handling-discontinuity.mdx
@@ -4,12 +4,13 @@ meta_description: "Detect and recover from message discontinuity in Ably Pub/Sub
meta_keywords: "discontinuity, message continuity, resumed flag, message recovery, missed messages, reconnection, history, untilAttach"
redirect_from:
- /docs/guides/handling-discontinuity
+ - /docs/guides/pub-sub/handling-discontinuity
---
When a client experiences a period of disconnection longer than the two-minute recovery window, or when Ably signals a loss of message continuity, your application may have missed messages. This is called a *discontinuity*. This guide explains how to detect and recover from discontinuities in Pub/Sub applications.
## What causes discontinuity