From f59d82450d4f14116fae5767549028ecd6e0361d Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 2 Dec 2025 09:55:52 +0000 Subject: [PATCH 01/59] Add AI transport as a product within docs --- src/data/index.ts | 14 +++++++++++++- src/data/languages/languageData.ts | 3 +++ src/data/nav/aitransport.ts | 23 +++++++++++++++++++++++ src/data/nav/index.ts | 11 ++++++++++- src/data/types.ts | 2 +- src/pages/docs/ai-transport/index.mdx | 6 ++++++ 6 files changed, 56 insertions(+), 3 deletions(-) create mode 100644 src/data/nav/aitransport.ts create mode 100644 src/pages/docs/ai-transport/index.mdx diff --git a/src/data/index.ts b/src/data/index.ts index b7bd28c49a..c36331ceb1 100644 --- a/src/data/index.ts +++ b/src/data/index.ts @@ -1,4 +1,12 @@ -import { chatNavData, liveObjectsNavData, liveSyncNavData, platformNavData, pubsubNavData, spacesNavData } from './nav'; +import { + aiTransportNavData, + chatNavData, + liveObjectsNavData, + liveSyncNavData, + platformNavData, + pubsubNavData, + spacesNavData, +} from './nav'; import { languageData } from './languages'; import { PageData, ProductData } from './types'; import homepageContentData from './content/homepage'; @@ -16,6 +24,10 @@ export const productData = { nav: chatNavData, languages: languageData.chat, }, + aiTransport: { + nav: aiTransportNavData, + languages: languageData.aiTransport, + }, spaces: { nav: spacesNavData, languages: languageData.spaces, diff --git a/src/data/languages/languageData.ts b/src/data/languages/languageData.ts index f5c1120992..2ac17475ee 100644 --- a/src/data/languages/languageData.ts +++ b/src/data/languages/languageData.ts @@ -41,6 +41,9 @@ export default { swift: '1.0', kotlin: '1.0', }, + aiTransport: { + javascript: '2.11', + }, spaces: { javascript: '0.4', react: '0.4', diff --git a/src/data/nav/aitransport.ts b/src/data/nav/aitransport.ts new file mode 100644 index 0000000000..53699c56e5 --- /dev/null +++ b/src/data/nav/aitransport.ts @@ -0,0 +1,23 @@ +import { NavProduct } from './types'; + +export default { + name: 'Ably AI Transport', + link: '/docs/ai-transport', + icon: { + closed: 'icon-gui-prod-ai-transport-outline', + open: 'icon-gui-prod-ai-transport-solid', + }, + content: [ + { + name: 'Introduction', + pages: [ + { + name: 'About AI Transport', + link: '/docs/ai-transport', + index: true, + }, + ], + }, + ], + api: [], +} satisfies NavProduct; diff --git a/src/data/nav/index.ts b/src/data/nav/index.ts index aac3975f22..e5ed49fddb 100644 --- a/src/data/nav/index.ts +++ b/src/data/nav/index.ts @@ -1,8 +1,17 @@ import platformNavData from './platform'; import pubsubNavData from './pubsub'; import chatNavData from './chat'; +import aiTransportNavData from './aitransport'; import liveObjectsNavData from './liveobjects'; import spacesNavData from './spaces'; import liveSyncNavData from './livesync'; -export { platformNavData, pubsubNavData, chatNavData, liveObjectsNavData, spacesNavData, liveSyncNavData }; +export { + platformNavData, + pubsubNavData, + chatNavData, + aiTransportNavData, + liveObjectsNavData, + spacesNavData, + liveSyncNavData, +}; diff --git a/src/data/types.ts b/src/data/types.ts index 0884a04a8b..a9c2b2977b 100644 --- a/src/data/types.ts +++ b/src/data/types.ts @@ -3,7 +3,7 @@ import { LanguageData } from './languages/types'; import { NavProduct } from './nav/types'; const pageKeys = ['homepage'] as const; -const productKeys = ['platform', 'pubsub', 'chat', 'spaces', 'liveObjects', 'liveSync'] as const; +const productKeys = ['platform', 'pubsub', 'chat', 'aiTransport', 'spaces', 'liveObjects', 'liveSync'] as const; export type ProductKey = (typeof productKeys)[number]; type PageKey = (typeof pageKeys)[number]; diff --git a/src/pages/docs/ai-transport/index.mdx b/src/pages/docs/ai-transport/index.mdx new file mode 100644 index 0000000000..fb2f2b271e --- /dev/null +++ b/src/pages/docs/ai-transport/index.mdx @@ -0,0 +1,6 @@ +--- +title: About AI Transport +meta_description: "Learn more about Ably's AI Transport and the features that enable you to quickly build functionality into new and existing applications." +redirect_from: + - /docs/products/ai-transport +--- From a6d9352e6b881d50219dbe501d883ee743f8c3c4 Mon Sep 17 00:00:00 2001 From: matt423 Date: Mon, 15 Dec 2025 10:12:58 +0000 Subject: [PATCH 02/59] chore: Add AI Transport examples filter --- src/components/Examples/ExamplesGrid.tsx | 2 ++ src/data/examples/index.ts | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/components/Examples/ExamplesGrid.tsx b/src/components/Examples/ExamplesGrid.tsx index c372f8ee84..1fde8d68dc 100644 --- a/src/components/Examples/ExamplesGrid.tsx +++ b/src/components/Examples/ExamplesGrid.tsx @@ -32,6 +32,8 @@ const ExamplesGrid = ({ return 'text-blue-600'; case 'liveObjects': return 'text-green-600'; + case 'aiTransport': + return 'text-cyan-500'; default: return 'text-orange-700'; } diff --git a/src/data/examples/index.ts b/src/data/examples/index.ts index 6e6467b355..644f4023d1 100644 --- a/src/data/examples/index.ts +++ b/src/data/examples/index.ts @@ -287,6 +287,9 @@ export const products = { spaces: { label: 'Spaces', }, + aitransport: { + label: 'AI Transport', + }, }; const useCasesList = [ From a88aba6b6ee7c9b72dff6d9e39d1b477f4f56f24 Mon Sep 17 00:00:00 2001 From: matt423 Date: Mon, 15 Dec 2025 10:13:23 +0000 Subject: [PATCH 03/59] chore: Add AI Transport product tile to the homepage Link to the pending `/ai-transport` overview page. --- src/data/content/homepage.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/data/content/homepage.ts b/src/data/content/homepage.ts index 71c113c8ea..f25ece224d 100644 --- a/src/data/content/homepage.ts +++ b/src/data/content/homepage.ts @@ -38,6 +38,10 @@ export default { name: 'liveSync', link: '/docs/livesync', }, + { + name: 'aiTransport', + link: '/docs/ai-transport', + }, ], }, examples: { From d1536b3cbd1d23c3be38499f59f7b74dc0855def Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 9 Dec 2025 17:21:45 +0000 Subject: [PATCH 04/59] ait/token-streaming: add message per token page --- src/data/nav/aitransport.ts | 9 +++++++++ .../features/token-streaming/message-per-token.mdx | 4 ++++ 2 files changed, 13 insertions(+) create mode 100644 src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx diff --git a/src/data/nav/aitransport.ts b/src/data/nav/aitransport.ts index 53699c56e5..a0cea2f5cc 100644 --- a/src/data/nav/aitransport.ts +++ b/src/data/nav/aitransport.ts @@ -18,6 +18,15 @@ export default { }, ], }, + { + name: 'Token streaming', + pages: [ + { + name: 'Message per token', + link: '/docs/ai-transport/features/token-streaming/message-per-token', + }, + ], + }, ], api: [], } satisfies NavProduct; diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx new file mode 100644 index 0000000000..ce3b34eba0 --- /dev/null +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx @@ -0,0 +1,4 @@ +--- +title: Message per token +meta_description: "Stream individual tokens from AI models as separate messages over Ably." +--- From f770ce70ce466fcd4155bafe32944dcf5d695f46 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 9 Dec 2025 18:11:47 +0000 Subject: [PATCH 05/59] ait/message-per-token: add intro Add intro describing the pattern, its properties, and use cases. --- .../features/token-streaming/message-per-token.mdx | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx index ce3b34eba0..1b7f6b9920 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx @@ -2,3 +2,12 @@ title: Message per token meta_description: "Stream individual tokens from AI models as separate messages over Ably." --- + +Token streaming with message-per-token is a pattern where every token generated by your model is published as its own Ably message. Each token then appears as one message in the channel history. + +This pattern is useful when clients only care about the most recent part of a response and you are happy to treat the channel history as a short sliding window rather than a full conversation log. For example: + +- **Backend-stored responses**: The backend writes complete responses to a database and clients load those full responses from there, while Ably is used only to deliver live tokens for the current in-progress response. +- **Live transcription, captioning, or translation**: A viewer who joins a live stream only needs the last few tokens for the current "frame" of subtitles, not the entire transcript so far. +- **Code assistance in an editor**: Streamed tokens become part of the file on disk as they are accepted, so past tokens do not need to be replayed from Ably. +- **Autocomplete**: A fresh response is streamed for each change a user makes to a document, with only the latest suggestion being relevant. From 149a322eaac22e9a9b3ca7e90d4ee1d5ef06a554 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 9 Dec 2025 18:59:37 +0000 Subject: [PATCH 06/59] ait/message-per-token: add token publishing Includes continuous token streams, correlating tokens for distinct responses, and explicit start/end events. --- .../token-streaming/message-per-token.mdx | 134 ++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx index 1b7f6b9920..1571d0473e 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx @@ -11,3 +11,137 @@ This pattern is useful when clients only care about the most recent part of a re - **Live transcription, captioning, or translation**: A viewer who joins a live stream only needs the last few tokens for the current "frame" of subtitles, not the entire transcript so far. - **Code assistance in an editor**: Streamed tokens become part of the file on disk as they are accepted, so past tokens do not need to be replayed from Ably. - **Autocomplete**: A fresh response is streamed for each change a user makes to a document, with only the latest suggestion being relevant. + + +To get started with token streaming, all you need to do is: + +* [Use a channel](#use) +* [Publish tokens from your server](#publish) +* [Subscribe to the token stream](#subscribe) + +## Use a channel + +[Channels](/docs/channels) separate message traffic into different topics. For token streaming, each conversation or session typically has its own channel. + +Use the [`get()`](/docs/api/realtime-sdk/channels#get) method to create or retrieve a channel instance: + + +```javascript +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); +``` + + +## Publish tokens from your server + +Publishing tokens to a channel is how your AI agent communicates responses to clients. Subscribers receive tokens in realtime as they're published. + + + +Initialize an Ably Realtime client on your server: + + +```javascript +import Ably from 'ably'; + +const realtime = new Ably.Realtime({ key: 'YOUR_API_KEY' }); +``` + + +### Continuous token stream + +For simple streaming scenarios such as live transcription, where all tokens are part of a continuous stream, simply publish each token as a message on the channel: + + +```javascript +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); + +// Example: stream returns events like { type: 'token', text: 'Hello' } +for await (const event of stream) { + if (event.type === 'token') { + await channel.publish('token', event.text); + } +} +``` + + +### Token stream with distinct responses + +For applications with multiple, distinct responses, such as chat conversations, include a `responseId` in message [extras](/docs/messages#properties) to correlate tokens together that belong to the same response: + + +```javascript +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); + +// Example: stream returns events like { type: 'token', text: 'Hello', responseId: 'resp_abc123' } +for await (const event of stream) { + if (event.type === 'token') { + await channel.publish({ + name: 'token', + data: event.text, + extras: { + headers: { + responseId: event.responseId + } + } + }); + } +} +``` + + +Clients use the `responseId` to group tokens belonging to the same response. + +### Token stream with explicit start/end events + +In some cases, your AI model response stream may include explicit events to mark response boundaries: + + +```javascript +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); + +// Example: stream returns events like: +// { type: 'start', responseId: 'resp_abc123', metadata: { model: 'llama-3' } } +// { type: 'token', responseId: 'resp_abc123', text: 'Hello' } +// { type: 'end', responseId: 'resp_abc123' } + +for await (const event of stream) { + if (event.type === 'start') { + // Publish response start + await channel.publish({ + name: 'response.start', + extras: { + headers: { + responseId: event.responseId, + model: event.metadata?.model + } + } + }); + } else if (event.type === 'token') { + // Publish tokens + await channel.publish({ + name: 'token', + data: event.text, + extras: { + headers: { + responseId: event.responseId + } + } + }); + } else if (event.type === 'end') { + // Publish response complete + await channel.publish({ + name: 'response.complete', + extras: { + headers: { + responseId: event.responseId + } + } + }); + } +} +``` + + +This pattern provides explicit boundaries, making it easier for clients to manage response state. From e0c010e47eda56d93865bcd4cc65dd7d24ca35aa Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 9 Dec 2025 21:34:29 +0000 Subject: [PATCH 07/59] ait/message-per-token: token streaming patterns Splits each token streaming approach into distinct patterns and shows both the publish and subscribe side behaviour alongside one another. --- .../token-streaming/message-per-token.mdx | 171 +++++++++++++----- 1 file changed, 128 insertions(+), 43 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx index 1571d0473e..f0e7d12442 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx @@ -3,7 +3,7 @@ title: Message per token meta_description: "Stream individual tokens from AI models as separate messages over Ably." --- -Token streaming with message-per-token is a pattern where every token generated by your model is published as its own Ably message. Each token then appears as one message in the channel history. +Token streaming with message-per-token is a pattern where every token generated by your model is published as its own Ably message. Each token then appears as one message in the channel history. This uses [Ably Pub/Sub](/docs/basics) for realtime communication between agents and clients. This pattern is useful when clients only care about the most recent part of a response and you are happy to treat the channel history as a short sliding window rather than a full conversation log. For example: @@ -12,14 +12,9 @@ This pattern is useful when clients only care about the most recent part of a re - **Code assistance in an editor**: Streamed tokens become part of the file on disk as they are accepted, so past tokens do not need to be replayed from Ably. - **Autocomplete**: A fresh response is streamed for each change a user makes to a document, with only the latest suggestion being relevant. +## Publishing tokens -To get started with token streaming, all you need to do is: - -* [Use a channel](#use) -* [Publish tokens from your server](#publish) -* [Subscribe to the token stream](#subscribe) - -## Use a channel +Publish tokens from a [Realtime](/docs/api/realtime-sdk) client, which maintains a persistent connection to the Ably service. This allows you to publish at very high message rates with the lowest possible latencies, while preserving guarantees around message delivery order. For more information, see [Realtime and REST](/docs/basics#realtime-and-rest). [Channels](/docs/channels) separate message traffic into different topics. For token streaming, each conversation or session typically has its own channel. @@ -31,27 +26,37 @@ const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); ``` -## Publish tokens from your server - -Publishing tokens to a channel is how your AI agent communicates responses to clients. Subscribers receive tokens in realtime as they're published. - - - -Initialize an Ably Realtime client on your server: +When publishing tokens, don't await the `channel.publish()` call. Ably rolls up acknowledgments and debounces them for efficiency, which means awaiting each publish would unnecessarily slow down your token stream. Messages are still published in the order that `publish()` is called, so delivery order is not affected. ```javascript -import Ably from 'ably'; +// ✅ Do this - publish without await for maximum throughput +for await (const event of stream) { + if (event.type === 'token') { + channel.publish('token', event.text); + } +} -const realtime = new Ably.Realtime({ key: 'YOUR_API_KEY' }); +// ❌ Don't do this - awaiting each publish reduces throughput +for await (const event of stream) { + if (event.type === 'token') { + await channel.publish('token', event.text); + } +} ``` -### Continuous token stream +This approach maximizes throughput while maintaining ordering guarantees, allowing you to stream tokens as fast as your AI model generates them. -For simple streaming scenarios such as live transcription, where all tokens are part of a continuous stream, simply publish each token as a message on the channel: +## Streaming patterns + +Ably is a pub/sub messaging platform, so you can structure your messages however works best for your application. Below are common patterns for streaming tokens, each showing both agent-side publishing and client-side subscription. Choose the approach that fits your use case, or create your own variation. + +### Continuous token stream + +For simple streaming scenarios such as live transcription, where all tokens are part of a continuous stream, simply publish each token as a message. + +#### Publish tokens ```javascript @@ -60,15 +65,33 @@ const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); // Example: stream returns events like { type: 'token', text: 'Hello' } for await (const event of stream) { if (event.type === 'token') { - await channel.publish('token', event.text); + channel.publish('token', event.text); } } ``` -### Token stream with distinct responses +#### Subscribe to tokens + + +```javascript +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); + +// Subscribe to token messages +await channel.subscribe('token', (message) => { + const token = message.data; + console.log(token); // log each token as it arrives +}); +``` + + +This pattern is simple and works well when you're displaying a single, continuous stream of tokens. -For applications with multiple, distinct responses, such as chat conversations, include a `responseId` in message [extras](/docs/messages#properties) to correlate tokens together that belong to the same response: +### Token stream with multiple responses + +For applications with multiple responses, such as chat conversations, include a `responseId` in message [extras](/docs/messages#properties) to correlate tokens together that belong to the same response. + +#### Publish tokens ```javascript @@ -77,7 +100,7 @@ const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); // Example: stream returns events like { type: 'token', text: 'Hello', responseId: 'resp_abc123' } for await (const event of stream) { if (event.type === 'token') { - await channel.publish({ + channel.publish({ name: 'token', data: event.text, extras: { @@ -91,36 +114,66 @@ for await (const event of stream) { ``` -Clients use the `responseId` to group tokens belonging to the same response. +#### Subscribe to tokens + +Use the `responseId` header in message extras to correlate tokens. The `responseId` allows you to group tokens belonging to the same response and correctly handle token delivery for multiple responses, even when delivered concurrently. + + +```javascript +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); + +// Track responses by ID +const responses = new Map(); + +await channel.subscribe('token', (message) => { + const token = message.data; + const responseId = message.extras?.headers?.responseId; + + if (!responseId) { + console.warn('Token missing responseId'); + return; + } + + // Create an empty response + if (!responses.has(responseId)) { + responses.set(responseId, ''); + } + + // Append token to response + responses.set(responseId, responses.get(responseId) + token); +}); +``` + + +### Token stream with explicit start/stop events -### Token stream with explicit start/end events +In some cases, your AI model response stream may include explicit events to mark response boundaries. You can indicate the event type, such as a response start/stop event, using the Ably message name. -In some cases, your AI model response stream may include explicit events to mark response boundaries: +#### Publish tokens ```javascript const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); // Example: stream returns events like: -// { type: 'start', responseId: 'resp_abc123', metadata: { model: 'llama-3' } } -// { type: 'token', responseId: 'resp_abc123', text: 'Hello' } -// { type: 'end', responseId: 'resp_abc123' } +// { type: 'message_start', responseId: 'resp_abc123' } +// { type: 'message_delta', responseId: 'resp_abc123', text: 'Hello' } +// { type: 'message_stop', responseId: 'resp_abc123' } for await (const event of stream) { - if (event.type === 'start') { + if (event.type === 'message_start') { // Publish response start - await channel.publish({ - name: 'response.start', + channel.publish({ + name: 'start', extras: { headers: { - responseId: event.responseId, - model: event.metadata?.model + responseId: event.responseId } } }); - } else if (event.type === 'token') { + } else if (event.type === 'message_delta') { // Publish tokens - await channel.publish({ + channel.publish({ name: 'token', data: event.text, extras: { @@ -129,10 +182,10 @@ for await (const event of stream) { } } }); - } else if (event.type === 'end') { - // Publish response complete - await channel.publish({ - name: 'response.complete', + } else if (event.type === 'message_stop') { + // Publish response stop + channel.publish({ + name: 'stop', extras: { headers: { responseId: event.responseId @@ -144,4 +197,36 @@ for await (const event of stream) { ``` -This pattern provides explicit boundaries, making it easier for clients to manage response state. +#### Subscribe to tokens + +Handle each event type to manage response lifecycle: + + +```javascript +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); + +const responses = new Map(); + +// Handle response start +await channel.subscribe('start', (message) => { + const responseId = message.extras?.headers?.responseId; + responses.set(responseId, ''); +}); + +// Handle tokens +await channel.subscribe('token', (message) => { + const responseId = message.extras?.headers?.responseId; + const token = message.data; + + const currentText = responses.get(responseId) || ''; + responses.set(responseId, currentText + token); +}); + +// Handle response stop +await channel.subscribe('stop', (message) => { + const responseId = message.extras?.headers?.responseId; + const finalText = responses.get(responseId); + console.log('Response complete:', finalText); +}); +``` + From e4d1b1a7ed2f928c531ea0f9bb58630724397722 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Wed, 10 Dec 2025 09:51:35 +0000 Subject: [PATCH 08/59] ait/message-per-token: client hydration patterns Includes hydration with rewind and hydration with persisted history + untilAttach. Describes the pattern for handling in-progress live responses with complete responses loaded from the database. --- .../token-streaming/message-per-token.mdx | 197 ++++++++++++++++++ 1 file changed, 197 insertions(+) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx index f0e7d12442..7e0f48e794 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx @@ -230,3 +230,200 @@ await channel.subscribe('stop', (message) => { }); ``` + +## Client hydration + +When clients connect or reconnect, such as after a page refresh, they often need to catch up on tokens that were published while they were offline or before they joined. Ably provides several approaches to hydrate client state depending on your application's requirements. + + + +### Using rewind for recent history + +The simplest approach is to use Ably's [rewind](/docs/channels/options/rewind) channel option to automatically retrieve recent tokens when attaching to a channel: + + +```javascript +// Use rewind to receive recent historical messages +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}', { + params: { rewind: '2m' } // or rewind: 100 for message count +}); + +// Subscribe to receive both recent historical and live messages, +// which are delivered in order to the subscription +await channel.subscribe('token', (message) => { + const token = message.data; + + // Process tokens from both recent history and live stream + console.log('Token received:', token); +}); +``` + + +Rewind supports two formats: + +- **Time-based**: Use a time interval like `'30s'` or `'2m'` to retrieve messages from that time period +- **Count-based**: Use a number like `50` or `100` to retrieve the most recent N messages (maximum 100) + + + +By default, rewind is limited to the last 2 minutes of messages. This is usually sufficient for scenarios where clients need only recent context, such as for continuous token streaming, or when the response stream from a given model request does not exceed 2 minutes. If you need more than 2 minutes of history, see [Using history for longer persistence](#history). + +### Using history for longer persistence + +For applications that need to retrieve tokens beyond the 2-minute rewind window, enable [persistence](/docs/storage-history/storage#all-message-persistence) on your channel. Use [channel history](/docs/storage-history/history) with the [`untilAttach` option](/docs/storage-history/history#continuous-history) to paginate back through history to obtain historical tokens, while preserving continuity with the delivery of live tokens: + + +```javascript +// Use a channel in a namespace called 'persisted', which has persistence enabled +const channel = realtime.channels.get('persisted:{{RANDOM_CHANNEL_NAME}}'); + +let response = ''; + +// Subscribe to live messages (implicitly attaches the channel) +await channel.subscribe('token', (message) => { + // Append the token to the end of the response + response += message.data; +}); + +// Fetch history up until the point of attachment +let page = await channel.history({ untilAttach: true }); + +// Paginate backwards through history +while (page) { + // Messages are newest-first, so prepend them to response + for (const message of page.items) { + response = message.data + response; + } + + // Move to next page if available + page = page.hasNext() ? await page.next() : null; +} +``` + + +### Hydrating an in-progress live response + +A common pattern is to persist complete model responses in your database while using Ably for live token delivery of the in-progress response. + +The client loads completed responses from your database, then reaches back into Ably channel history until it encounters a token for a response it's already loaded. + +You can retrieve partial history using either the [rewind](#rewind) or [history](#history) pattern. + +#### Hydrate using rewind + +Load completed responses from your database, then use rewind to catch up on any in-progress responses, skipping any tokens that belong to a response that was already loaded: + + +```javascript +// Load completed responses from database +const completedResponses = await loadResponsesFromDatabase(); + +// Use rewind to receive recent historical messages +const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}', { + params: { rewind: '2m' } +}); + +// Track in progress responses by ID +const inProgressResponses = new Map(); + +// Subscribe to receive both recent historical and live messages, +// which are delivered in order to the subscription +await channel.subscribe('token', (message) => { + const token = message.data; + const responseId = message.extras?.headers?.responseId; + + if (!responseId) { + console.warn('Token missing responseId'); + return; + } + + // Skip tokens for responses already hydrated from database + if (completedResponses.has(responseId)) { + return; + } + + // Create an empty in-progress response + if (!inProgressResponses.has(responseId)) { + inProgressResponses.set(responseId, ''); + } + + // Append tokens for new responses + inProgressResponses.set(responseId, inProgressResponses.get(responseId) + token); +}); +``` + + +#### Hydrate using history + +Load completed responses from your database, then paginate backwards through history to catch up on in-progress responses until you reach a token that belongs to a response you've already loaded: + + +```javascript +// Load completed responses from database +const completedResponses = await loadResponsesFromDatabase(); + +// Use a channel in a namespace called 'persisted', which has persistence enabled +const channel = realtime.channels.get('persisted:{{RANDOM_CHANNEL_NAME}}'); + +// Track in progress responses by ID +const inProgressResponses = new Map(); + +// Subscribe to live tokens (implicitly attaches) +await channel.subscribe('token', (message) => { + const token = message.data; + const responseId = message.extras?.headers?.responseId; + + if (!responseId) { + console.warn('Token missing responseId'); + return; + } + + // Skip tokens for responses already hydrated from database + if (completedResponses.has(responseId)) { + return; + } + + // Create an empty in-progress response + if (!inProgressResponses.has(responseId)) { + inProgressResponses.set(responseId, ''); + } + + // Append live tokens for in-progress responses + inProgressResponses.set(responseId, inProgressResponses.get(responseId) + token); +}); + +// Paginate backwards through history until we encounter a hydrated response +let page = await channel.history({ untilAttach: true }); + +// Paginate backwards through history +let done = false; +while (page && !done) { + // Messages are newest-first, so prepend them to response + for (const message of page.items) { + const token = message.data; + const responseId = message.extras?.headers?.responseId; + + // Stop when we reach a response already loaded from database + if (completedResponses.has(responseId)) { + done = true; + break; + } + + // Create an empty in-progress response + if (!inProgressResponses.has(responseId)) { + inProgressResponses.set(responseId, ''); + } + + // Prepend historical tokens for in-progress responses + inProgressResponses.set(responseId, token + inProgressResponses.get(responseId)); + } + + // Move to next page if available + page = page.hasNext() ? await page.next() : null; +} +``` + From da59a92984c807ddf927c3cd3fad5b2174af8d68 Mon Sep 17 00:00:00 2001 From: zak Date: Thu, 11 Dec 2025 11:52:27 +0000 Subject: [PATCH 09/59] ai-transport: add message per response doc Add doc explaining streaming tokens with appendMessage and update compaction allowing message-per-response history. --- src/data/nav/aitransport.ts | 9 + .../token-streaming/message-per-response.mdx | 495 ++++++++++++++++++ 2 files changed, 504 insertions(+) create mode 100644 src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx diff --git a/src/data/nav/aitransport.ts b/src/data/nav/aitransport.ts index a0cea2f5cc..4b892d74ac 100644 --- a/src/data/nav/aitransport.ts +++ b/src/data/nav/aitransport.ts @@ -16,6 +16,15 @@ export default { link: '/docs/ai-transport', index: true, }, + { + name: 'Token streaming', + pages: [ + { + name: 'Message per response', + link: '/docs/ai-transport/features/token-streaming/message-per-response', + }, + ], + }, ], }, { diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx new file mode 100644 index 0000000000..41a21ba555 --- /dev/null +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -0,0 +1,495 @@ +--- +title: Message per response +meta_description: "Stream individual tokens from AI models into a single message over Ably." +--- + +Stream LLM and generative AI responses efficiently by appending individual tokens to a single +message on an Ably channel. This pattern creates one complete response message in channel history +while delivering tokens in realtime. + +## Overview + +The message-per-response pattern enables you to stream AI-generated content as individual tokens in +realtime, while maintaining a clean, compacted message history. Each AI response becomes a single +message that grows as tokens are appended, resulting in efficient storage and easy retrieval of +complete responses. + +### When to use this pattern + +This approach is ideal when: + +- You want each complete AI response stored as a single message in history. +- You want clients joining mid-stream to catch up efficiently without processing thousands of + individual tokens. +- Your application displays progressive AI responses that build up over time. + +### How it works + +1. **Initial message**: When an AI response begins, publish an initial message with `message.create` + action to the Ably channel with an empty or the first token as content. +2. **Token streaming**: Append subsequent tokens to the original message by publishing those tokens + with the `message.append` action. +3. **Live Delivery**: Clients subscribed to the channel receive each appended token in real-time, allowing + them to progressively render the response. +4. **Compacted history**: The channel history contains only one message per AI response, + which includes all tokens appended to it concatenated together. + +You do not need to mark the message or token stream as completed; the final message will +automatically have the full response with all tokens appended to it. + +## Setup + +Message append functionality requires the "Message annotations, updates, and deletes" [channel rule](/docs/channels#rules) enabled for your channel or [namespace](/docs/channels#namespaces). This rule automatically enables message persistence. + +To enable the channel rule: + +1. Go to the [Ably dashboard](https://www.ably.com/dashboard) and select your app. +2. Navigate to the "Configuration" > "Rules" section from the left-hand navigation bar. +3. Choose "Add new rule". +4. Enter a channel name or namespace pattern (e.g. `ai:*` for all channels starting with `ai:`). +5. Select the "Message annotations, updates, and deletes" rule from the list. +6. Click "Create channel rule". + +The examples in this guide use the `ai:` namespace prefix, which assumes you have configured the rule for `ai:*`. + +### Message size limits + +Standard Ably message [size limits](/docs/platform/pricing/limits#message) apply to the complete concatenated message. The system validates size limits before accepting append operations. If appending a token would exceed the maximum message size, the append is rejected. + +## Publishing tokens + +You should publish tokens from a [Realtime](/docs/api/realtime-sdk) client, which maintains a +persistent connection to the Ably service. This allows you to publish at very high message rates +with the lowest possible latencies, while preserving guarantees around message delivery order. +For more information, see [Realtime and REST](/docs/basics#realtime-and-rest). + +[Channels](/docs/channels) are used to separate message traffic into different topics. +For token streaming, each conversation or session typically has its own channel. + +Use the [`get()`](/docs/api/realtime-sdk/channels#get) method to create or retrieve a channel instance: + + +```javascript +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); +``` + + +To start streaming an AI response, publish the initial message. Then append each subsequent token +to that message as it arrives from the AI model: + + +```javascript +// Example: stream yields string tokens like 'Hello', ' world', '!' + +// Publish initial message and capture the serial for appending tokens +const { serials: [msgSerial] } = await channel.publish('response', { data: '' }); + +for await (const token of stream) { + // Append each token as it arrives + channel.appendMessage(msgSerial, token); +} +``` + + +When publishing tokens, don't await the `channel.appendMessage()` call. Ably rolls up acknowledgments +and debounces them for efficiency, which means awaiting each append would unnecessarily slow down +your token stream. Messages are still published in the order that `appendMessage()` is called, so delivery +order is not affected. + +Append only supports concatenating data of the same type as the original message. For example, if +the initial message data is a string, all appended tokens must also be strings. If the initial +message data is binary, all appended tokens must be binary. + +This pattern allows publishing append operations for multiple concurrent model responses on the same +channel. As long as you append to the correct message serial, tokens from different responses will +not interfere with each other, and the final concatenated message for each response will contain only the tokens +from that response. + +### Complete publish example + +The following example shows how to stream an AI response, publishing the first token as the initial message and appending subsequent tokens: + + +```javascript +const realtime = new Ably.Realtime('{{API_KEY}}'); +const channel = realtime.channels.get('ai:responses'); + +async function streamAIResponse(prompt) { + // Example: stream yields string tokens like 'Hello', ' world', '!' + const stream = await getAIModelStream(prompt); + + let messageSerial; + + for await (const token of stream) { + if (!messageSerial) { + // First token: create the message and get serial + const response = await channel.publish('ai-response', token); + messageSerial = response.serials[0]; + } else { + // Subsequent tokens: append without awaiting + channel.appendMessage(messageSerial, token); + } + } +} +``` + + +## Subscribing to token streams + +Subscribers receive different message actions depending on when they join and how they're retrieving +messages. + +When subscribed to a channel, clients receive the initial message with the `message.create` action, +followed by each token as a `message.append` action in real-time. + + +```javascript +const channel = realtime.channels.get('ai:responses'); + +// Track responses by message serial +const responses = new Map(); + +await channel.subscribe((msg) => { + switch (msg.action) { + case 'message.create': + // New response started + responses.set(msg.serial, msg.data); + break; + case 'message.append': + // Append token to existing response + const current = responses.get(msg.serial) || ''; + responses.set(msg.serial, current + msg.data); + break; + case 'message.update': + // Replace entire response content + responses.set(msg.serial, msg.data); + break; + } +}); +``` + + +Each `message.append` event contains only the new token fragment in `msg.data`, not the full +concatenated response. + +Occasionally you may receive a `message.update` action, which indicates that the channel needs to stream the entire message data so far. For example, this can happen if the client [resumes](/docs/connect/states#resume) after a transient disconnection and the channel needs to resynchronize the full message state. In this case, `msg.data` contains the complete response up to that point. For `message.update` events, you should replace the entire response content. + +## Client hydration + +Clients joining a channel or recovering from disconnection can efficiently catchup using rewind or +history. For temporary disconnections, Ably's automatic [connection recovery](docs/connect/states#connection-state-recovery) +ensures that clients receive all missed tokens in order. + +By using either rewind or history with `untilAttach`, clients can efficiently hydrate the existing +response state without needing to process every individual token. Both rewind and history deliver +concatenated responses as `message.update` events and seamlessly transition from historical +responses to live `message.append` events. + +### Using rewind + +[Rewind](/docs/channels/options/rewind) attaches to a channel starting from a point in the past, delivering complete concatenated +messages as `message.update` events. + + + +```javascript +// Use rewind to receive recent historical messages +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}', { + params: { rewind: '2m' } // or rewind: '10' for message count +}); + +// Track responses by message serial +const responses = new Map(); + +await channel.subscribe((msg) => { + switch (msg.action) { + case 'message.create': + // New response started + responses.set(msg.serial, msg.data); + break; + case 'message.append': + // Append token to existing response + const current = responses.get(msg.serial) || ''; + responses.set(msg.serial, current + msg.data); + break; + case 'message.update': + // Replace entire response content + responses.set(msg.serial, msg.data); + break; + } +}); +``` + + +### Using history with untilAttach + +The `untilAttach` option provides [continuous history](/docs/storage-history/history#continuous-history) +from the point of attachment backward: + + +```javascript +const channel = realtime.channels.get('ai:responses'); + +const responses = new Map(); + +// Subscribe to live messages (implicitly attaches the channel) +await channel.subscribe((msg) => { + switch (msg.action) { + case 'message.create': + responses.set(msg.id, msg.data); + break; + case 'message.append': + const current = responses.get(msg.id) || ''; + responses.set(msg.id, current + msg.data); + break; + case 'message.update': + responses.set(msg.id, msg.data); + break; + } +}); + +// Fetch history up until the point of attachment +let page = await channel.history({ untilAttach: true }); + +// Paginate backwards through history +while (page) { + // Messages are newest-first + for (const message of page.items) { + // message.data contains the full concatenated text + responses.set(message.id, message.data); + } + + // Move to next page if available + page = page.hasNext() ? await page.next() : null; +} +``` + + +### Hydrating an in-progress response + +A common pattern is to persist completed responses in your database while using Ably for streaming in-progress responses. When clients reconnect, they load completed responses from your database first, then use Ably to catch up on any response that was still in progress. + +#### Hydrate using rewind + +Load completed responses from your database, then use rewind to catch up on any in-progress response, skipping messages for responses already loaded: + + +```javascript +// Load completed responses from your database +const completedResponses = await loadResponsesFromDatabase(); + +const channel = realtime.channels.get('ai:responses', { + params: { rewind: '2m' } +}); + +await channel.subscribe((msg) => { + const responseId = msg.extras?.headers?.responseId; + + // Skip messages for responses already loaded from database + if (completedResponses.has(responseId)) { + return; + } + + switch (msg.action) { + case 'message.create': + displayNewResponse(msg.data, responseId); + break; + case 'message.append': + appendToResponse(msg.data, responseId); + break; + case 'message.update': + replaceResponse(msg.data, responseId); + break; + } +}); +``` + + +#### Hydrate using history + +Load completed responses from your database, then use history to catch up on any in-progress response: + + +```javascript +// Load completed responses from your database +const completedResponses = await loadResponsesFromDatabase(); + +const channel = realtime.channels.get('ai:responses'); + +// Subscribe to live messages (implicitly attaches) +await channel.subscribe((msg) => { + const responseId = msg.extras?.headers?.responseId; + + // Skip messages for responses already loaded from database + if (completedResponses.has(responseId)) { + return; + } + + switch (msg.action) { + case 'message.create': + displayNewResponse(msg.data, responseId); + break; + case 'message.append': + appendToResponse(msg.data, responseId); + break; + case 'message.update': + replaceResponse(msg.data, responseId); + break; + } +}); + +// Fetch history for any in-progress response +const historyPage = await channel.history({ untilAttach: true }); + +for (const msg of historyPage.items) { + const responseId = msg.extras?.headers?.responseId; + + // Skip responses already loaded from database + if (completedResponses.has(responseId)) { + continue; + } + + // msg.data contains the full concatenated text so far + displayFullResponse(msg.data, responseId); +} +``` + + +## Headers and metadata + +Use the `extras.headers` field to attach metadata to your messages. Headers are useful for correlating Ably messages with external systems, such as your database IDs or AI model request identifiers. + +### Header superseding behavior + +When you include headers in an append operation, they completely replace all previous headers on the message. This "last write wins" behavior means you must include all headers you want to retain with each append that specifies headers. + + +```javascript +// Initial message with headers +const response = await channel.publish({ + name: 'ai-response', + data: 'Hello', + extras: { + headers: { + responseId: 'resp_123', + model: 'gpt-4' + } + } +}); + +// Append without headers - previous headers are retained +channel.appendMessage(response.serials[0], ' world'); +// Message headers: { responseId: 'resp_123', model: 'gpt-4' } + +// Append with headers - completely replaces previous headers +channel.appendMessage(response.serials[0], '!', { + extras: { + headers: { + responseId: 'resp_123', + model: 'gpt-4', + tokensUsed: '15' + } + } +}); +// Message headers: { responseId: 'resp_123', model: 'gpt-4', tokensUsed: '15' } +``` + + +A common pattern is to include static metadata in the initial message, then add completion metadata with the final append: + + +```javascript +async function streamWithMetadata(prompt) { + const stream = await getAIModelStream(prompt); + let messageSerial; + let tokenCount = 0; + + for await (const token of stream) { + tokenCount++; + if (!messageSerial) { + // First token: include static metadata + const response = await channel.publish({ + name: 'ai-response', + data: token, + extras: { + headers: { + responseId: prompt.responseId, + model: prompt.model + } + } + }); + messageSerial = response.serials[0]; + } else { + // Subsequent tokens: append without headers + channel.appendMessage(messageSerial, token); + } + } + + // Final append: include completion metadata + channel.appendMessage(messageSerial, '', { + extras: { + headers: { + responseId: prompt.responseId, + model: prompt.model, + tokensUsed: String(tokenCount), + completedAt: new Date().toISOString() + } + } + }); +} +``` + + +### Metadata best practices + +Do not include metadata in the body of an append request. Instead, use the `extras.headers` field to +keep metadata separate from the message content. This ensures that clients can easily process the +concatenated response without needing to parse out metadata. + + +```javascript +// ✓ GOOD: Metadata in headers +const response = await channel.publish({ + data: 'The response text', // Pure concatenated text + extras: { + headers: { + model: 'gpt-4', + } + } +}); + +// ✗ BAD: Mixing metadata with content +const response = await channel.publish({ + data: JSON.stringify({ // Don't do this + text: 'The response text', + model: 'gpt-4', + }) +}); +``` + + +By including metadata in the body of the message, the final concatenated response would contain all +the metadata from each append, making it difficult to extract the pure response text. + +For example, if you appended tokens with metadata in the body, the final message data would look +like this: + +```json +{ + "text": "Hello", + "model": "gpt-4", +}{ + "text": " world", + "model": "gpt-4", +}{ + "text": "!", + "model": "gpt-4", +} +``` + +If you use headers for metadata, and the body only contains the response text, the final message +data would be simply: + +```text +Hello world! +``` From 0e6811cdde1aa687549b48c2028c7335d555b287 Mon Sep 17 00:00:00 2001 From: zak Date: Thu, 11 Dec 2025 17:40:08 +0000 Subject: [PATCH 10/59] fix nav and typos --- src/data/nav/aitransport.ts | 14 +++++++------- .../token-streaming/message-per-response.mdx | 8 ++++---- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/data/nav/aitransport.ts b/src/data/nav/aitransport.ts index 4b892d74ac..97d5167ce4 100644 --- a/src/data/nav/aitransport.ts +++ b/src/data/nav/aitransport.ts @@ -16,14 +16,14 @@ export default { link: '/docs/ai-transport', index: true, }, + ], + }, + { + name: 'Token streaming', + pages: [ { - name: 'Token streaming', - pages: [ - { - name: 'Message per response', - link: '/docs/ai-transport/features/token-streaming/message-per-response', - }, - ], + name: 'Message per response', + link: '/docs/ai-transport/features/token-streaming/message-per-response', }, ], }, diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 41a21ba555..4cfa42f7c2 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -29,7 +29,7 @@ This approach is ideal when: action to the Ably channel with an empty or the first token as content. 2. **Token streaming**: Append subsequent tokens to the original message by publishing those tokens with the `message.append` action. -3. **Live Delivery**: Clients subscribed to the channel receive each appended token in real-time, allowing +3. **Live Delivery**: Clients subscribed to the channel receive each appended token in realtime, allowing them to progressively render the response. 4. **Compacted history**: The channel history contains only one message per AI response, which includes all tokens appended to it concatenated together. @@ -58,12 +58,12 @@ Standard Ably message [size limits](/docs/platform/pricing/limits#message) apply ## Publishing tokens -You should publish tokens from a [Realtime](/docs/api/realtime-sdk) client, which maintains a +Publish tokens from a [Realtime](/docs/api/realtime-sdk) client, which maintains a persistent connection to the Ably service. This allows you to publish at very high message rates with the lowest possible latencies, while preserving guarantees around message delivery order. For more information, see [Realtime and REST](/docs/basics#realtime-and-rest). -[Channels](/docs/channels) are used to separate message traffic into different topics. +[Channels](/docs/channels) separate message traffic into different topics. For token streaming, each conversation or session typically has its own channel. Use the [`get()`](/docs/api/realtime-sdk/channels#get) method to create or retrieve a channel instance: @@ -140,7 +140,7 @@ Subscribers receive different message actions depending on when they join and ho messages. When subscribed to a channel, clients receive the initial message with the `message.create` action, -followed by each token as a `message.append` action in real-time. +followed by each token as a `message.append` action in realtime. ```javascript From ee684fa3ebba4efea43e7710af635e0fbc934157 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 20:33:11 +0000 Subject: [PATCH 11/59] ai-transport/token-streaming: unify nav Unifies the token streaming nav for token streaming after rebase. --- src/data/nav/aitransport.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/data/nav/aitransport.ts b/src/data/nav/aitransport.ts index 97d5167ce4..dd82007afa 100644 --- a/src/data/nav/aitransport.ts +++ b/src/data/nav/aitransport.ts @@ -25,11 +25,6 @@ export default { name: 'Message per response', link: '/docs/ai-transport/features/token-streaming/message-per-response', }, - ], - }, - { - name: 'Token streaming', - pages: [ { name: 'Message per token', link: '/docs/ai-transport/features/token-streaming/message-per-token', From ffb0064155d0059d313d49e0d76fabfea4abe06a Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 20:49:02 +0000 Subject: [PATCH 12/59] ai-transport/token-streaming: refine intro Refines the intro copy in message-per-response to have structural similarity with the message-per-token page. --- .../token-streaming/message-per-response.mdx | 51 +++++++------------ 1 file changed, 17 insertions(+), 34 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 4cfa42f7c2..8d5cd2ffbb 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -3,43 +3,30 @@ title: Message per response meta_description: "Stream individual tokens from AI models into a single message over Ably." --- -Stream LLM and generative AI responses efficiently by appending individual tokens to a single -message on an Ably channel. This pattern creates one complete response message in channel history -while delivering tokens in realtime. +Token streaming with message-per-response is a pattern where every token generated by your model is appended to a single Ably message. Each complete AI response then appears as one message in the channel history while delivering live tokens in realtime. This uses [Ably Pub/Sub](/docs/basics) for realtime communication between agents and clients. -## Overview +This pattern is useful for chat-style applications where you want each complete AI response stored as a single message in history, making it easy to retrieve and display multi-response conversation history. Each agent response becomes a single message that grows as tokens are appended, allowing clients joining mid-stream to catch up efficiently without processing thousands of individual tokens. -The message-per-response pattern enables you to stream AI-generated content as individual tokens in -realtime, while maintaining a clean, compacted message history. Each AI response becomes a single -message that grows as tokens are appended, resulting in efficient storage and easy retrieval of -complete responses. +## How it works -### When to use this pattern +1. **Initial message**: When an agent response begins, publish an initial message with `message.create` action to the Ably channel with an empty or the first token as content. +2. **Token streaming**: Append subsequent tokens to the original message by publishing those tokens with the `message.append` action. +3. **Live delivery**: Clients subscribed to the channel receive each appended token in realtime, allowing them to progressively render the response. +4. **Compacted history**: The channel history contains only one message per agent response, which includes all tokens appended to it concatenated together. -This approach is ideal when: +You do not need to mark the message or token stream as completed; the final message content will automatically include the full response constructed from all appended tokens. -- You want each complete AI response stored as a single message in history. -- You want clients joining mid-stream to catch up efficiently without processing thousands of - individual tokens. -- Your application displays progressive AI responses that build up over time. - -### How it works - -1. **Initial message**: When an AI response begins, publish an initial message with `message.create` - action to the Ably channel with an empty or the first token as content. -2. **Token streaming**: Append subsequent tokens to the original message by publishing those tokens - with the `message.append` action. -3. **Live Delivery**: Clients subscribed to the channel receive each appended token in realtime, allowing - them to progressively render the response. -4. **Compacted history**: The channel history contains only one message per AI response, - which includes all tokens appended to it concatenated together. + -You do not need to mark the message or token stream as completed; the final message will -automatically have the full response with all tokens appended to it. +## Enable appends -## Setup +Message append functionality requires the "Message annotations, updates, and deletes" [channel rule](/docs/channels#rules) enabled for your channel or [namespace](/docs/channels#namespaces). -Message append functionality requires the "Message annotations, updates, and deletes" [channel rule](/docs/channels#rules) enabled for your channel or [namespace](/docs/channels#namespaces). This rule automatically enables message persistence. + To enable the channel rule: @@ -50,11 +37,7 @@ To enable the channel rule: 5. Select the "Message annotations, updates, and deletes" rule from the list. 6. Click "Create channel rule". -The examples in this guide use the `ai:` namespace prefix, which assumes you have configured the rule for `ai:*`. - -### Message size limits - -Standard Ably message [size limits](/docs/platform/pricing/limits#message) apply to the complete concatenated message. The system validates size limits before accepting append operations. If appending a token would exceed the maximum message size, the append is rejected. +The examples on this page use the `ai:` namespace prefix, which assumes you have configured the rule for `ai:*`. ## Publishing tokens From e0c5cd8e77c3d5b85df09760b2a8a4e19d4d19cb Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 21:15:42 +0000 Subject: [PATCH 13/59] ai-transport: refine Publishing section Refine the Publishing section of the message-per-response docs. - Include anchor tags on title - Describe the `serial` identifier - Align with stream pattern used in message-per-token docs - Remove duplicate example --- .../token-streaming/message-per-response.mdx | 73 +++++++------------ 1 file changed, 26 insertions(+), 47 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 8d5cd2ffbb..591e8e59bf 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -39,15 +39,11 @@ To enable the channel rule: The examples on this page use the `ai:` namespace prefix, which assumes you have configured the rule for `ai:*`. -## Publishing tokens +## Publishing tokens -Publish tokens from a [Realtime](/docs/api/realtime-sdk) client, which maintains a -persistent connection to the Ably service. This allows you to publish at very high message rates -with the lowest possible latencies, while preserving guarantees around message delivery order. -For more information, see [Realtime and REST](/docs/basics#realtime-and-rest). +Publish tokens from a [Realtime](/docs/api/realtime-sdk) client, which maintains a persistent connection to the Ably service. This allows you to publish at very high message rates with the lowest possible latencies, while preserving guarantees around message delivery order. For more information, see [Realtime and REST](/docs/basics#realtime-and-rest). -[Channels](/docs/channels) separate message traffic into different topics. -For token streaming, each conversation or session typically has its own channel. +[Channels](/docs/channels) separate message traffic into different topics. For token streaming, each conversation or session typically has its own channel. Use the [`get()`](/docs/api/realtime-sdk/channels#get) method to create or retrieve a channel instance: @@ -57,66 +53,49 @@ const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); ``` -To start streaming an AI response, publish the initial message. Then append each subsequent token -to that message as it arrives from the AI model: +To start streaming an AI response, publish the initial message. The message is identified by a server-assigned identifier called a [`serial`](/docs/messages#properties). Use the `serial` to append each subsequent token to the message as it arrives from the AI model: ```javascript -// Example: stream yields string tokens like 'Hello', ' world', '!' - // Publish initial message and capture the serial for appending tokens const { serials: [msgSerial] } = await channel.publish('response', { data: '' }); -for await (const token of stream) { +// Example: stream returns events like { type: 'token', text: 'Hello' } +for await (const event of stream) { // Append each token as it arrives - channel.appendMessage(msgSerial, token); + if (event.type === 'token') { + channel.appendMessage(msgSerial, event.text); + } } ``` -When publishing tokens, don't await the `channel.appendMessage()` call. Ably rolls up acknowledgments -and debounces them for efficiency, which means awaiting each append would unnecessarily slow down -your token stream. Messages are still published in the order that `appendMessage()` is called, so delivery -order is not affected. - -Append only supports concatenating data of the same type as the original message. For example, if -the initial message data is a string, all appended tokens must also be strings. If the initial -message data is binary, all appended tokens must be binary. - -This pattern allows publishing append operations for multiple concurrent model responses on the same -channel. As long as you append to the correct message serial, tokens from different responses will -not interfere with each other, and the final concatenated message for each response will contain only the tokens -from that response. - -### Complete publish example - -The following example shows how to stream an AI response, publishing the first token as the initial message and appending subsequent tokens: +When publishing tokens, don't await the `channel.appendMessage()` call. Ably rolls up acknowledgments and debounces them for efficiency, which means awaiting each append would unnecessarily slow down your token stream. Messages are still published in the order that `appendMessage()` is called, so delivery order is not affected. ```javascript -const realtime = new Ably.Realtime('{{API_KEY}}'); -const channel = realtime.channels.get('ai:responses'); - -async function streamAIResponse(prompt) { - // Example: stream yields string tokens like 'Hello', ' world', '!' - const stream = await getAIModelStream(prompt); - - let messageSerial; +// ✅ Do this - append without await for maximum throughput +for await (const event of stream) { + if (event.type === 'token') { + channel.appendMessage(msgSerial, event.text); + } +} - for await (const token of stream) { - if (!messageSerial) { - // First token: create the message and get serial - const response = await channel.publish('ai-response', token); - messageSerial = response.serials[0]; - } else { - // Subsequent tokens: append without awaiting - channel.appendMessage(messageSerial, token); - } +// ❌ Don't do this - awaiting each append reduces throughput +for await (const event of stream) { + if (event.type === 'token') { + await channel.appendMessage(msgSerial, event.text); } } ``` + + +This pattern allows publishing append operations for multiple concurrent model responses on the same channel. As long as you append to the correct message serial, tokens from different responses will not interfere with each other, and the final concatenated message for each response will contain only the tokens from that response. + ## Subscribing to token streams Subscribers receive different message actions depending on when they join and how they're retrieving From f324e1effee4d0fd150144ffb6bcd9797edfea3c Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 21:33:21 +0000 Subject: [PATCH 14/59] ai-transport: refine Subscribing section Refine the Subscribing section of the message-per-response docs. - Add anchor tag to heading - Describes each action upfront - Uses RANDOM_CHANNEL_NAME --- .../token-streaming/message-per-response.mdx | 30 ++++++++----------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 591e8e59bf..986be27747 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -96,46 +96,42 @@ Append only supports concatenating data of the same type as the original message This pattern allows publishing append operations for multiple concurrent model responses on the same channel. As long as you append to the correct message serial, tokens from different responses will not interfere with each other, and the final concatenated message for each response will contain only the tokens from that response. -## Subscribing to token streams +## Subscribing to token streams -Subscribers receive different message actions depending on when they join and how they're retrieving -messages. +Subscribers receive different message actions depending on when they join and how they're retrieving messages. Each message has an `action` field that indicates how to process it, and a `serial` field that identifies which message the action relates to: -When subscribed to a channel, clients receive the initial message with the `message.create` action, -followed by each token as a `message.append` action in realtime. +- `message.create`: Indicates a new response has started (i.e. a new message was created). The message `data` contains the initial content (often empty or the first token). Store this as the beginning of a new response using `serial` as the identifier. +- `message.append`: Contains a single token fragment to append. The message `data` contains only the new token, not the full concatenated response. Append this token to the existing response identified by `serial`. +- `message.update`: Contains the complete response up to that point. The message `data` contains the full concatenated text so far. Replace the entire response content with this data for the message identified by `serial`. This action occurs when the channel needs to resynchronize the full message state, such as after a client [resumes](/docs/connect/states#resume) from a transient disconnection. ```javascript -const channel = realtime.channels.get('ai:responses'); +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); // Track responses by message serial const responses = new Map(); -await channel.subscribe((msg) => { - switch (msg.action) { +// Subscribe to live messages (implicitly attaches the channel) +await channel.subscribe((message) => { + switch (message.action) { case 'message.create': // New response started - responses.set(msg.serial, msg.data); + responses.set(message.serial, message.data); break; case 'message.append': // Append token to existing response - const current = responses.get(msg.serial) || ''; - responses.set(msg.serial, current + msg.data); + const current = responses.get(message.serial) || ''; + responses.set(message.serial, current + message.data); break; case 'message.update': // Replace entire response content - responses.set(msg.serial, msg.data); + responses.set(message.serial, message.data); break; } }); ``` -Each `message.append` event contains only the new token fragment in `msg.data`, not the full -concatenated response. - -Occasionally you may receive a `message.update` action, which indicates that the channel needs to stream the entire message data so far. For example, this can happen if the client [resumes](/docs/connect/states#resume) after a transient disconnection and the channel needs to resynchronize the full message state. In this case, `msg.data` contains the complete response up to that point. For `message.update` events, you should replace the entire response content. - ## Client hydration Clients joining a channel or recovering from disconnection can efficiently catchup using rewind or From 60fb7fb15d3f1ca2517a76f49b094f48db58348f Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 21:59:01 +0000 Subject: [PATCH 15/59] ai-transport: refine rewind section Refine the rewind section of the message-per-response docs. - Include description of allowed rewind paameters - Tweak copy --- .../token-streaming/message-per-response.mdx | 40 +++++++++++-------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 986be27747..3492381796 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -134,20 +134,17 @@ await channel.subscribe((message) => { ## Client hydration -Clients joining a channel or recovering from disconnection can efficiently catchup using rewind or -history. For temporary disconnections, Ably's automatic [connection recovery](docs/connect/states#connection-state-recovery) -ensures that clients receive all missed tokens in order. +When clients connect or reconnect, such as after a page refresh, they often need to catch up on complete responses and individual tokens that were published while they were offline or before they joined. -By using either rewind or history with `untilAttach`, clients can efficiently hydrate the existing -response state without needing to process every individual token. Both rewind and history deliver -concatenated responses as `message.update` events and seamlessly transition from historical -responses to live `message.append` events. +The message per response pattern enables efficient client state hydration without needing to process every individual token and supports seamlessly transitioning from historical responses to live tokens. -### Using rewind + -[Rewind](/docs/channels/options/rewind) attaches to a channel starting from a point in the past, delivering complete concatenated -messages as `message.update` events. +### Using rewind for recent history +The simplest approach is to use Ably's [rewind](/docs/channels/options/rewind) channel option to attach to the channel at some point in the recent past, and automatically receive all messages since that point. Historical messages are delivered as `message.update` events containing the complete concatenated response, which then seamlessly transition to live `message.append` events for any ongoing responses: ```javascript @@ -159,26 +156,37 @@ const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}', { // Track responses by message serial const responses = new Map(); -await channel.subscribe((msg) => { - switch (msg.action) { +// Subscribe to receive both recent historical and live messages, +// which are delivered in order to the subscription +await channel.subscribe((message) => { + switch (message.action) { case 'message.create': // New response started - responses.set(msg.serial, msg.data); + responses.set(message.serial, message.data); break; case 'message.append': // Append token to existing response - const current = responses.get(msg.serial) || ''; - responses.set(msg.serial, current + msg.data); + const current = responses.get(message.serial) || ''; + responses.set(message.serial, current + message.data); break; case 'message.update': // Replace entire response content - responses.set(msg.serial, msg.data); + responses.set(message.serial, message.data); break; } }); ``` +Rewind supports two formats: + +- **Time-based**: Use a time interval like `'30s'` or `'2m'` to retrieve messages from that time period +- **Count-based**: Use a number like `10` or `50` to retrieve the most recent N messages (maximum 100) + + + ### Using history with untilAttach The `untilAttach` option provides [continuous history](/docs/storage-history/history#continuous-history) From 917ec8e3fdad6ca453664a5ace57344e9409ff25 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 22:08:26 +0000 Subject: [PATCH 16/59] ai-transport/token-streaming: refine history Refines the history section for the message-per-response docs. - Adds anchor to heading - Uses RANDOM_CHANNEL_NAME - Use message serial in code snippet instead of ID - Tweaks copy --- .../token-streaming/message-per-response.mdx | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 3492381796..2b68f9a3b3 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -187,29 +187,32 @@ Rewind supports two formats: At most 100 messages will be retrieved in a rewind request. If more messages exist within the specified interval, only the most recent 100 are sent. -### Using history with untilAttach +### Using history for older messages -The `untilAttach` option provides [continuous history](/docs/storage-history/history#continuous-history) -from the point of attachment backward: +Use [channel history](/docs/storage-history/history) with the [`untilAttach` option](/docs/storage-history/history#continuous-history) to paginate back through history to obtain historical responses, while preserving continuity with the delivery of live tokens: ```javascript -const channel = realtime.channels.get('ai:responses'); +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); +// Track responses by message serial const responses = new Map(); // Subscribe to live messages (implicitly attaches the channel) -await channel.subscribe((msg) => { - switch (msg.action) { +await channel.subscribe((message) => { + switch (message.action) { case 'message.create': - responses.set(msg.id, msg.data); + // New response started + responses.set(message.serial, message.data); break; case 'message.append': - const current = responses.get(msg.id) || ''; - responses.set(msg.id, current + msg.data); + // Append token to existing response + const current = responses.get(message.serial) || ''; + responses.set(message.serial, current + message.data); break; case 'message.update': - responses.set(msg.id, msg.data); + // Replace entire response content + responses.set(message.serial, message.data); break; } }); @@ -222,7 +225,7 @@ while (page) { // Messages are newest-first for (const message of page.items) { // message.data contains the full concatenated text - responses.set(message.id, message.data); + responses.set(message.serial, message.data); } // Move to next page if available From 0b3d40b854b7ca6a587d80c6f3e957587b0f2191 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 22:52:17 +0000 Subject: [PATCH 17/59] ai-transport/token-streaming: in-progress rewind Fix the hydration of in progress responses via rewind by using the responseId in the extras to correlate messages with completed responses loaded from the database. --- .../token-streaming/message-per-response.mdx | 76 ++++++++++++++++--- 1 file changed, 67 insertions(+), 9 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 2b68f9a3b3..29f3141b9b 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -234,46 +234,104 @@ while (page) { ``` -### Hydrating an in-progress response +### Hydrating an in-progress response -A common pattern is to persist completed responses in your database while using Ably for streaming in-progress responses. When clients reconnect, they load completed responses from your database first, then use Ably to catch up on any response that was still in progress. +A common pattern is to persist complete model responses in your database while using Ably for streaming in-progress responses. + +The client loads completed responses from your database, then uses Ably to catch up on any response that was still in progress. + +You can hydrate in-progress responses using either the [rewind](#rewind) or [history](#history) pattern. + +#### Publishing with correlation metadata + +To correlate Ably messages with your database records, include the `responseId` in the message [extras](/docs/messages#properties) when publishing: + + +```javascript +// Publish initial message with responseId in extras +const { serials: [msgSerial] } = await channel.publish({ + name: 'response', + data: '', + extras: { + headers: { + responseId: 'resp_abc123' // Your database response ID + } + } +}); + +// Append tokens, including extras to preserve headers +for await (const event of stream) { + if (event.type === 'token') { + channel.appendMessage(msgSerial, event.text, { + extras: { + headers: { + responseId: 'resp_abc123' + } + } + }); + } +} +``` + + + #### Hydrate using rewind -Load completed responses from your database, then use rewind to catch up on any in-progress response, skipping messages for responses already loaded: +When hydrating, load completed responses from your database, then use rewind to catch up on any in-progress response. Check the `responseId` from message extras to skip responses already loaded from your database: ```javascript // Load completed responses from your database +// completedResponses is a Set of responseIds const completedResponses = await loadResponsesFromDatabase(); +// Use rewind to receive recent historical messages const channel = realtime.channels.get('ai:responses', { params: { rewind: '2m' } }); -await channel.subscribe((msg) => { - const responseId = msg.extras?.headers?.responseId; +// Track in-progress responses by responseId +const inProgressResponses = new Map(); + +await channel.subscribe((message) => { + const responseId = message.extras?.headers?.responseId; + + if (!responseId) { + console.warn('Message missing responseId'); + return; + } // Skip messages for responses already loaded from database if (completedResponses.has(responseId)) { return; } - switch (msg.action) { + switch (message.action) { case 'message.create': - displayNewResponse(msg.data, responseId); + // New response started + inProgressResponses.set(responseId, message.data); break; case 'message.append': - appendToResponse(msg.data, responseId); + // Append token to existing response + const current = inProgressResponses.get(responseId) || ''; + inProgressResponses.set(responseId, current + message.data); break; case 'message.update': - replaceResponse(msg.data, responseId); + // Replace entire response content + inProgressResponses.set(responseId, message.data); break; } }); ``` + + #### Hydrate using history Load completed responses from your database, then use history to catch up on any in-progress response: From 5abb8aaa8cde31bd94dc52e9fad741e717696490 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 22:54:15 +0000 Subject: [PATCH 18/59] ai-transport/token-streaming: in progress history Fix the hydration of in progress responses using history by obtaining the timestamp of the last completed response loaded from the database and paginating history forwards from that point. --- .../token-streaming/message-per-response.mdx | 65 ++++++++++++++----- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index 29f3141b9b..a8318bde1e 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -334,54 +334,83 @@ Alternatively, instead of including `responseId` in message extras, you could st #### Hydrate using history -Load completed responses from your database, then use history to catch up on any in-progress response: +Load completed responses from your database, then use [channel history](/docs/storage-history/history) with the [`untilAttach` option](/docs/storage-history/history#continuous-history) to catch up on any in-progress responses. Use the timestamp of the last completed response to start pagination from that point forward, ensuring continuity with live message delivery. ```javascript -// Load completed responses from your database +// Load completed responses from database (sorted by timestamp, oldest first) const completedResponses = await loadResponsesFromDatabase(); -const channel = realtime.channels.get('ai:responses'); +// Get the timestamp of the latest completed response +const latestTimestamp = completedResponses.latest().timestamp; + +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); + +// Track in progress responses by ID +const inProgressResponses = new Map(); // Subscribe to live messages (implicitly attaches) -await channel.subscribe((msg) => { - const responseId = msg.extras?.headers?.responseId; +await channel.subscribe((message) => { + const responseId = message.extras?.headers?.responseId; + + if (!responseId) { + console.warn('Message missing responseId'); + return; + } // Skip messages for responses already loaded from database if (completedResponses.has(responseId)) { return; } - switch (msg.action) { + switch (message.action) { case 'message.create': - displayNewResponse(msg.data, responseId); + // New response started + inProgressResponses.set(responseId, message.data); break; case 'message.append': - appendToResponse(msg.data, responseId); + // Append token to existing response + const current = inProgressResponses.get(responseId) || ''; + inProgressResponses.set(responseId, current + message.data); break; case 'message.update': - replaceResponse(msg.data, responseId); + // Replace entire response content + inProgressResponses.set(responseId, message.data); break; } }); -// Fetch history for any in-progress response -const historyPage = await channel.history({ untilAttach: true }); +// Fetch history from the last completed response until attachment +let page = await channel.history({ + untilAttach: true, + start: latestTimestamp, + direction: 'forwards' +}); -for (const msg of historyPage.items) { - const responseId = msg.extras?.headers?.responseId; +// Paginate through all missed messages +while (page) { + for (const message of page.items) { + const responseId = message.extras?.headers?.responseId; - // Skip responses already loaded from database - if (completedResponses.has(responseId)) { - continue; + if (!responseId) { + console.warn('Message missing responseId'); + continue; + } + + // message.data contains the full concatenated text so far + inProgressResponses.set(responseId, message.data); } - // msg.data contains the full concatenated text so far - displayFullResponse(msg.data, responseId); + // Move to next page if available + page = page.hasNext() ? await page.next() : null; } ``` + + ## Headers and metadata Use the `extras.headers` field to attach metadata to your messages. Headers are useful for correlating Ably messages with external systems, such as your database IDs or AI model request identifiers. From 0ec70fcb9a4b33c48045790a1e85e0a67a8c8640 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 22:57:04 +0000 Subject: [PATCH 19/59] ai-transport/token-streaming: remove metadata Removes the headers/metadata section, as this covers the specific semantics of extras.headers handling with appends, which is better addressed by the (upcoming) message append pub/sub docs. Instead, a callout is used to describe header mixin semantics in the appropriate place insofar as it relates to the discussion at hand. --- .../token-streaming/message-per-response.mdx | 139 ------------------ 1 file changed, 139 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index a8318bde1e..b91bf42ae1 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -410,142 +410,3 @@ while (page) { - -## Headers and metadata - -Use the `extras.headers` field to attach metadata to your messages. Headers are useful for correlating Ably messages with external systems, such as your database IDs or AI model request identifiers. - -### Header superseding behavior - -When you include headers in an append operation, they completely replace all previous headers on the message. This "last write wins" behavior means you must include all headers you want to retain with each append that specifies headers. - - -```javascript -// Initial message with headers -const response = await channel.publish({ - name: 'ai-response', - data: 'Hello', - extras: { - headers: { - responseId: 'resp_123', - model: 'gpt-4' - } - } -}); - -// Append without headers - previous headers are retained -channel.appendMessage(response.serials[0], ' world'); -// Message headers: { responseId: 'resp_123', model: 'gpt-4' } - -// Append with headers - completely replaces previous headers -channel.appendMessage(response.serials[0], '!', { - extras: { - headers: { - responseId: 'resp_123', - model: 'gpt-4', - tokensUsed: '15' - } - } -}); -// Message headers: { responseId: 'resp_123', model: 'gpt-4', tokensUsed: '15' } -``` - - -A common pattern is to include static metadata in the initial message, then add completion metadata with the final append: - - -```javascript -async function streamWithMetadata(prompt) { - const stream = await getAIModelStream(prompt); - let messageSerial; - let tokenCount = 0; - - for await (const token of stream) { - tokenCount++; - if (!messageSerial) { - // First token: include static metadata - const response = await channel.publish({ - name: 'ai-response', - data: token, - extras: { - headers: { - responseId: prompt.responseId, - model: prompt.model - } - } - }); - messageSerial = response.serials[0]; - } else { - // Subsequent tokens: append without headers - channel.appendMessage(messageSerial, token); - } - } - - // Final append: include completion metadata - channel.appendMessage(messageSerial, '', { - extras: { - headers: { - responseId: prompt.responseId, - model: prompt.model, - tokensUsed: String(tokenCount), - completedAt: new Date().toISOString() - } - } - }); -} -``` - - -### Metadata best practices - -Do not include metadata in the body of an append request. Instead, use the `extras.headers` field to -keep metadata separate from the message content. This ensures that clients can easily process the -concatenated response without needing to parse out metadata. - - -```javascript -// ✓ GOOD: Metadata in headers -const response = await channel.publish({ - data: 'The response text', // Pure concatenated text - extras: { - headers: { - model: 'gpt-4', - } - } -}); - -// ✗ BAD: Mixing metadata with content -const response = await channel.publish({ - data: JSON.stringify({ // Don't do this - text: 'The response text', - model: 'gpt-4', - }) -}); -``` - - -By including metadata in the body of the message, the final concatenated response would contain all -the metadata from each append, making it difficult to extract the pure response text. - -For example, if you appended tokens with metadata in the body, the final message data would look -like this: - -```json -{ - "text": "Hello", - "model": "gpt-4", -}{ - "text": " world", - "model": "gpt-4", -}{ - "text": "!", - "model": "gpt-4", -} -``` - -If you use headers for metadata, and the body only contains the response text, the final message -data would be simply: - -```text -Hello world! -``` From 718d88046adbe208dd86488dd1ac36604bdd36cb Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 22:59:01 +0000 Subject: [PATCH 20/59] ai-transport/token-streaming: add resume callout Update the token streaming with message per token docs to include a callout describing resume behaviour in case of transient disconnection. --- .../features/token-streaming/message-per-token.mdx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx index 7e0f48e794..21683449b3 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx @@ -235,6 +235,10 @@ await channel.subscribe('stop', (message) => { When clients connect or reconnect, such as after a page refresh, they often need to catch up on tokens that were published while they were offline or before they joined. Ably provides several approaches to hydrate client state depending on your application's requirements. + + From b6cd48273b704502df2d38b1337dbedb3c710019 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Tue, 16 Dec 2025 23:00:07 +0000 Subject: [PATCH 21/59] ai-transport/token-streaming: headers Fix the message per token docs headers to include anchors and align with naming in the message per response page. --- .../features/token-streaming/message-per-token.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx index 21683449b3..71e7fdaf5a 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx @@ -245,7 +245,7 @@ If you need to retrieve and process large amounts of historical data, consider u ### Using rewind for recent history -The simplest approach is to use Ably's [rewind](/docs/channels/options/rewind) channel option to automatically retrieve recent tokens when attaching to a channel: +The simplest approach is to use Ably's [rewind](/docs/channels/options/rewind) channel option to attach to the channel at some point in the recent past, and automatically receive all tokens since that point: ```javascript @@ -276,7 +276,7 @@ At most 100 messages will be retrieved in a rewind request. If more messages exi By default, rewind is limited to the last 2 minutes of messages. This is usually sufficient for scenarios where clients need only recent context, such as for continuous token streaming, or when the response stream from a given model request does not exceed 2 minutes. If you need more than 2 minutes of history, see [Using history for longer persistence](#history). -### Using history for longer persistence +### Using history for older messages For applications that need to retrieve tokens beyond the 2-minute rewind window, enable [persistence](/docs/storage-history/storage#all-message-persistence) on your channel. Use [channel history](/docs/storage-history/history) with the [`untilAttach` option](/docs/storage-history/history#continuous-history) to paginate back through history to obtain historical tokens, while preserving continuity with the delivery of live tokens: @@ -309,7 +309,7 @@ while (page) { ``` -### Hydrating an in-progress live response +### Hydrating an in-progress response A common pattern is to persist complete model responses in your database while using Ably for live token delivery of the in-progress response. From 01521db65aa2dad8caf172feb6020eb722ea004c Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Wed, 10 Dec 2025 23:42:31 +0000 Subject: [PATCH 22/59] ait: add sessions & identity docs Adds an overview page for a Sessions & Identity section which describes the channel-oriented session model and its benefits over the traditional connection-oriented model. Describes how identity relates to session management and how this works in the context of channel-oriented sessions. Shows how to use identified clients to assign a trusted identity to users and obtain this identity from the agent side. Shows how to use Ably capabilities to control which operations authenticated users can perform on which channels. Shows how to use authenticated user claims to associated a role or other attribute with a user. Updates the docs to describe how to handle authentication, capabilities, identity and roles/attributes for agents separately from end users. Describes how to use presence to mark users and agents as online/offline. Includes description of synthetic leaves in the event of abrupt disconnection. Describe how to subscribe to presence to see who is online, and take action when a user is offline across all devices. Add docs for resuming user and agent sessions, linking to hydration patterns for different token streaming approaches for user resumes and describing agent resume behaviour with message catch up. --- src/data/nav/aitransport.ts | 21 + .../ai-transport-before-and-after.png | Bin 0 -> 97361 bytes .../identifying-users-and-agents.mdx | 419 ++++++++++++++++++ .../features/sessions-identity/index.mdx | 63 +++ .../sessions-identity/online-status.mdx | 280 ++++++++++++ .../sessions-identity/resuming-sessions.mdx | 140 ++++++ 6 files changed, 923 insertions(+) create mode 100644 src/images/content/diagrams/ai-transport-before-and-after.png create mode 100644 src/pages/docs/ai-transport/features/sessions-identity/identifying-users-and-agents.mdx create mode 100644 src/pages/docs/ai-transport/features/sessions-identity/index.mdx create mode 100644 src/pages/docs/ai-transport/features/sessions-identity/online-status.mdx create mode 100644 src/pages/docs/ai-transport/features/sessions-identity/resuming-sessions.mdx diff --git a/src/data/nav/aitransport.ts b/src/data/nav/aitransport.ts index dd82007afa..bf8438cb4c 100644 --- a/src/data/nav/aitransport.ts +++ b/src/data/nav/aitransport.ts @@ -31,6 +31,27 @@ export default { }, ], }, + { + name: 'Sessions & Identity', + pages: [ + { + name: 'Overview', + link: '/docs/ai-transport/features/sessions-identity', + }, + { + name: 'Identifying users and agents', + link: '/docs/ai-transport/features/sessions-identity/identifying-users-and-agents', + }, + { + name: 'Online status', + link: '/docs/ai-transport/features/sessions-identity/online-status', + }, + { + name: 'Resuming sessions', + link: '/docs/ai-transport/features/sessions-identity/resuming-sessions', + }, + ], + }, ], api: [], } satisfies NavProduct; diff --git a/src/images/content/diagrams/ai-transport-before-and-after.png b/src/images/content/diagrams/ai-transport-before-and-after.png new file mode 100644 index 0000000000000000000000000000000000000000..d29ae4a6f1576546ffdecd48b1777792265c34aa GIT binary patch literal 97361 zcmeFZcRZW#|34aCbfBuW$Gg>{s8Kss7dnipy;tp3BVq-eYQI~vR$4SRwTTgJk=E8A zW>S=pAP9maoZF|LbAIQK^XEB_$2sTm&0kmUJNJEE*Xw$X=W8YQv4Jk@8NM?h5Qz1` z{d-S9AZA$*=y)H?Dd5VD=Q1=9Qv}b0d$&zOm{u5Po1VJB#17h7eF}mkLq2&t_;Ur8 z6H|6o;`Gp&eXT3c$51LEC(r$qd-upK$?(|25u+_X7M2fa3Vd-cLF_9i?x8}#t;cRx z+>i5KWecDrU;^Zwn?jgYb=;+4#70lls8>~6spn{WWgC@1QQux!Yb!IWs0Xgl+VRwy zKh@|)Z+&1@N1q#CY>)jNxu0ed{W}`sJEr${^ilCd=HJmRFBVy*{|uwY&O7|~k&4S5 zC;r~Hx*L7^@2Kt#i1Y8ro%MfS2vFkxQ5s6=E9{Wa@cYS!`>~&>;Rh6WM%HeZOyq~i zgC7N5C}{Xb&sL5b`CkwydM7-9GFG=xJxjH2aDF&w)&BHXvTz+MutJOCMeXQ4iop;y zmZE7)Yn-ubeFUa|!5dWVuQE>g&g)X?Dz@uzRO%bm)nr`O4vuuKK8i*wt9*oK;Bvn! zy*v0m5d~f+L#OMj$HxYR_ZFJ)Xt~zVRgU8?`);F$w`?M)aIzl!u%)WV@r@ue2-FA7 z7RF_@)5&>6#^GBn+bp}y(pkq@2EdofcJ!?wnqb)GjtBX>m@m09DnxW<2Rc|N%0_RT z`kW!$2wTOsZ$xRN3AK8WI?!ZB*0tp!V`5q4#`w5SE`eNqb(Io61=+!OY5;rds|>a0 zWc);F=4eJOVAUGEFcIfx7wqkKM(slO=(%w0St$_#2I&zhOH>jbUv=1+eE|fLoj#tn zPJS+|aP|`SOZ#aos|V^3`u(L9@V-m~itm{TcKAl#){E-vR?bgd6P~utGxMv>;%&Qr zd>@s8{k%VwnZTf0<|f&O-^)r6knX0y_wyRJcSItE8+ja?H`aucBLq0?>}d9W5(C=a zZsfMe+F_-HniC)gx$t%784zVJT>1g33F;N*{=>Z+?6-%anV03&J2S}6Pv?ohest@z z7vb*z;5oVfLL0xqV_kVrYiUtt(?<76FE9Ht(^PYEjLKD!7lVjI;=P9 zQDTE!iqrnM$bq*md~}<^?clt2vyw-BQdb*1Gq(6!Dg^d- z?X`C9?RUu#byYNJs%m|fA-gl=8O!odylmrs$k0ok3=Z*L29Oe z)kXizgfcDS?O`*o@Ll`w{Ih$MHiNTHWJ2i<&i83h@FsrH zAdQ4>RL3;y(;0EL=?ZMZeyt%>;0%8{s>zQuuJmM*_Uu1;yw(kOGLuHGMJk8gL_A95 zh_OHPQ@&Inhi~0^t1~se+9m0w%iZokAbj4tn%p22KeUmHZo8~FDpRPie*qNzTIySK z>%jtrx^f`>t)1nDNNfD9XPU2rIc_d0s+XbsN_|%cSsd9lLTN`PrPc$OjN?f+%lDP2 z!#}e@vu^E~?XY=joI~x;CA60Mtp0q1Tqz3!H!5~DVkgj5W;dK7r+X0{LCe*W(0Qe^ z6f!B?1}XGO2#W&r8%ty)xncOVjo~uKL;AtBKHBS_t6`N)roR$w2j;5bH0I2gdRQ;>(Hy6+Z%+_ zhO_*qIXX3?2!u;ai&er=`6}%CMad^Y(OnXEkYAQ(Q>9^q6wU+99&jc^M3_*Jc}Mx% z^re=d<9#=hA=ps`nAFjBeQuB3qqz6jt8cnnC77YMuCpgxM+~-{?_Har|G13M_k;M=Iaa??8cX)FwB;u%4c!MKagKot6#^;YG_%5R%acI*vNUs(pu}gornX&vxJd z$gyw-bc5>0vPlSh6+F9uS^g?q7&^~X!@<9!kN+A_(~^-xb=50YR=ap%g>r)@LyJy;3QE(b3UWezKc|T; z>ZHzVLMI5_jI|Mr!HxzyAa3#L>N~sX6(wB*5r;A0OZ#P*ATVU+r}oL9cI5htc+6{( z%o;=M&O5J42Jsbb6*cGwk-$$KtBpUz><7_Lll3smsakd!(xelCmaj4(v^x)Vg|`t` z*8At`-(m|(ShSq^UEnQqzk+`Evb!T=Pm^<5>M5Eghf0D;q4RRf(}oAy8)RrZqce!+ z(^#||G$1y(_PWF0w})v%R&BM;cw-_KZHOgY;cJ;w*RfzMi6rl|*+&@6sTR>FKUQL? zlaaYz-V=)Uhn`{EWsu(>_m-A@7s{9K!V{M{P`tLEFWWjm?T<2&m)s25p8K7#omDVwNi9L=p zdQ>GsEm3tME4{EpLfQRQfb6|+6Roq!2b&ol7e}cBYvI#)svqVP-7?;7w+ z+LVwFP0OHhCRlgaB5=n(qQ+f3=ix`5S@MKiRL$`AHNLJnu0cU~{cVwg`N*PHYMnm{ z^YJ(+g}e5$N$)vxTS)&t^`&dyN(T%=B)CAxZ4U z9E2A8=2fD>KNAW;{3Z>yQdc@wpwh|_;dd#4s;_JImlmF7W+A~(Fdc%R=)LcJnHSgi z%YqiIRe4^#mJUmR)6pbRjbn;oZBf4&!hFqv_fBLEJHsqq8g5fggYAf0IDXG0a}c&_oKH3ecd;t9@yJrn+#C2d-t~p9$&;|QcbZ5 zRq0cKdybdmHiteg>xlg8bF(z<;8PeE0gMn;3IuSytZ=<%hdLo8>fM0AmxD(plTLq3 zEX(gIiI-1>77;#o{Zpl(l9Hsgk=vEHCo}dMhCNLt(x^y96|KY9k1$NkJAMw5QDd4> zU7wfV>Cy~Ct*MqmN~}X42Ll8Q%c0g{eBHi>2dm36g9m?_wh?y{5E0ufUUx<-@D~%` z6LP|XIULfvR1>~XHh&?j_o&sui$PV8uD)2sL4-2^fET;vypnmj9dsssHyBJf3{B3B zp1&FYulP4~q~N=_Ta~|Jmv`sU8F06PX&hIEy=J?>#o0-b+&~kyXSyGBg5}EI5SfLV z4qs81M{{XglAMMaWihF_&gm)QBv&J|_9YDWxrn}x*8ocq-Sugk$yMRRIli=@Lm21F zo*8IV>>2h1_i;C$pH0;bQ$m=nTBQ$zn&xZK2 zAo8*%x(?E!qZAH#WM^DXMjAMzOQEK^L5!g-YsV^EAjQk%zbAkW4@4aAb9C7e&q&_q z0oJRn_S)NI#}~D_gF7tvdf6~)|Az=lYOLgE;>F>+E#UVOcyj((X$gc%9BmQ8oqv;S zD(;>t=;DjU{I4PnT827q9{j7Ve-^iPm=%0ogI3%wK1dbli=wnUGa=mDd z*R)8|4(nzaI#OxvaFoK6w&-HmoqLV{}@J+kvdhBFN{R0M<{Bpp2y`(Pe?zN)QWJ~XczqKShSC14{t-z_Lxg( zQs#KSmnL{J#|0&yCR+cD9e7PKY0xW0rx+RdYpdG7b;B<4O`YBG9*Yh$Se1fN8=DFB z1`1EaVHh~(YW8Yr_vWt%xF{h!budc6(7V>Y;NkWpCzinyT!tkRl_&(woofS>kHZQZ zAKG^@Brkq?R-_QI-OFizP87S3AkV76H?r8k>8)+OGA&Nigr}*4JRNs9UC6t6z4>3Y z+8>cBX`|#t;!=O)!p3<#_k!1!2MMR<7&QAapf4zGs%&AA-xsObMiLa$vuyKKpiuM8OPFZj%jW@)M%9W9G*JZStTa4~oz_jjJI^Se^6hXC* zTyAhqcA&q>uFMYFQ~36Y7{G=xHOTZ9ETlL8fLF8h&B07ycr&Wvv9HDknm25>M`uY> zNmgs#R(bJW551w|&3=on8}P6Ve}GzLYw+hJ{&wrtM z--MXuOF7^;yXy1q_)ElBy%RS^mo=DU1F=E0P!05Lf|_095%lS<3dG?N)FO@G$^=&_ z>ItF3%6G&UXEkR?avJwd08Kw=p{u}mIsfZZM0htEf>;g*^L5TKrmwc18x~hw0xJ^< zTABsJZ_NZ3#s@#-k_cZ&P;bf?yhbZlTy@2Y555d-pYK%2)oC6>s|p6wtJ_4;?;D|b zb;bcG^YrK`nv-DPlvuhNY3vLAH#1&q?Qn*}n6s)HVmv5B>305Q6HoqBOw%)OZZ085 z5W4*j?d8Wd#FF0%M&XCSfv-bYvm((t9~^4X7oR)VPn+4zSHqDU#VNT4O<>r{1{zhc z%lp*`l09nFK{0)mq2H5Yw3a}HrmWvg`0q*RPFL+)>APBohHv01_&qExp+^dRMAo>s zUpk`m20c=I7FX9$gSY4J>^RPC`-A}jUI+j44oQ2xwOSL!Oe$drr5kn0;WkCTBlldk zUsLnZE^xb1wd60|4%v#lt0rQ~nF^<0<=;{6*_m%2#?%|A+t-_Kn>0QM*VtV8!&jAG zT`_qp6<<3%{OKnxxZ&RUiJ&bVp<#2eN?re{W>N1%k`1am8Kq8}egJ8I3-kT;QNGx* zSgMk zuME9#VHP#^J?LQ8k>StKZeD62CzE+h+1@#e#?`2hjaNfB7t(TRLFePYJgr&S2@y0@ znH8Lm#1uZdCu(hqghFQGL@&sOtZZoWs%A0sgPanKhP7Fl$vB-+UF zts<9`L|W$NwK4r4LG6)ca>7OL4R00$)?=V(u~`#TXEIC8#u0bFXIT;GI*Igz4%mP z7hhHy4!bZIwG!p6mQ>Hjf+j?zp2`|`-IE8q+rl-v5e*XdEu>0L{qXtZ{4hp5qj(DI zRMD>~0w&rO9akgIMCk2J@3b5+ooAXPC(;hF?K|w8YsP|YTzFHXgEGwq-&E@vXI(|i z0^l5|y2kK)o}M;Q6i7P%MZjT9zaVTIE2YOh%0UqhME+c}FR}{(|mAL@4K(0oa+Tdt_o3t}F@tg0>_YJKxAqS!h z&B)Caf;4UV6@Eb}dY2cYjt+11?HNZb%-i7l7j`37An*<1#c!5+{8LrZ!V84kkiDTG zofH~#-OtVh)dlclTfvewu=~CRfaiTYoN|g+x>FxRZabR1$;Uc1I|!1no#2ZuMjyb> zy!(3tzC+m__Dm(kMbzJX+^FQFE-KM9e76xP^k{0-^h(R6lEWv<{`B{A)7m0E!)e<$ zHpPoYHO5F|0y71HYh&%pOQ(&0E%y9*)e-6i6{cv8_{i@tb~&N&^{hY3Xx9QLW7sd# zp8UEY_Exp#iAgQRORU01p=9MWh(mxzS&Vco)Yw3%U0R3{|FVif19)$eyJX{L)1J}+YZaJ&tjv^h;&CCsKFVcq&TxG zbTuR5MupPm$m(6x{D(D)RZOOtF@bDD|~ z3V1be33O=#W+SAUQS{=5@;7r7^XC1pL}!Y4<0hmD?>u#-=PHLq`U1HS`B}iTx1W{u zY#^nNYax&;nRk1S*mI*<@4Qx-OT+GAnHWbvXS*2{I@C~F;6JaSG_n8f71i{bUNS=h z>vAUu;Z-v*8}Znjd2mi%y?mN?lf-SDo{lGs8bmS9Neqs*4H9v^ql7qY(SGv}QXm&3 z?n!9FPxl`zOf*^)CkkHV)XXP_k64Z!{6np`Kyin|AxywF4Tjba@}2;yY}opSD_W1hUE-G`wCfFe>LjhW`k-U)eM zU^yjvjCdQe?%7O99q>{p_O`F6RfZ+AOAL0=%B^IITZB4`ztzx~%V)FHM$&bS*(jAv z#uw3Ubc(qRc$PYP({-k9@@0~SAM~4)Hmhy|TR4mfWMOk6^P16lzkD^9kzvj=V{%!N z5*0=&$wC>BueO%9FYlvtwrda?+*5Nq53k7rq4cpS73$j?UQd>VxHL!KBvSEO>j&J} zR~4$D`0H=>D?GkU#|;m1T-a&pB5{%KnAzHJ<|z;7KHNuLO!k{p;br9=Cbi&~`j?fw zb3fzs1d~_vq86+)Q+>OEyo=iTB`vqihag)aj6l z>Gw|FJhNwXy{eL?q&FkpCR!k=`vF(OTAH?Pj@lp6NsgbFqMG!7`ti}oS0CXRN3$_q zw$!?R0qz+Mu9UX({R8#-isQFJXXcl=0Q66_fXtMxFUd4k zz^#;#1wZR$@|5{ngPK4*e)>gBhc!>`EBT(mJY+^HrjdMtz}NAIYWU{r9lWDbIyaAY z3Rrt3u=yP2$5qtb(#ua#=Uq!NKgM7&W}|IvDY+Wg2JdmWG)wj9N7*&wUvnq1D;8aF zHOG3`VhFIbLI1sQtz$b4ZS1<$t`}%RmGfM<1s{l84oKcB78u;WT4g?h4;O_9?hjA7ZnHo~?6-ftn&o&dPjQQUV&j6{VL# zX$<^MNOq97BZwMf2hzOS4@TedU3Mz8Z+`x|(n5H#kKKSAq#arq%*Gl&B>kN)XQ*mLYySL2kMduy04c2Iv!$!!Gk zvr^4mYsQ*=V}rX^O2wnZNsfA{9>4ixeFNkBwdu zceldfD5QswVly+q-+d1+KJc8zofqy)GEqfXecegFtifgl zj0?Qnm-lV2G%BBbj(s!fWsnz^c|KxjnQmfFSjeD34gb9duNwNbmL9q^j z+pnw`-+Gr$HoNv`&AGk05+3y8h4@EuHN>l2R=NIXt@inszdRURcmEUjCXTrMDG|e|>pUxz z!UOZ~V44auNB_Vhbqd{O<4W@9+Pf6%C=+pnn@>W%APUBr+$+JvDrp;1;~W3-77L*T zGjF$_yR>r4-_M8tfiStA=;VJA=x>mHBJ+qXz>038@Tm3Uxv&Uh@}bdMEM9x!*|;*jD?i~ zw5{;2f`zuoJ=7b?*@ZdIw9NwiIwAOWqg#Sh& zcONq%0VKn?qv(BORdg;#35YY+KuSs~qik*cZsOF8ic}U8(RwVhB86EAOM2l`SBrLJ z;uRGL6os1aebfNA(6RIoEp+|TX0Mi(Bi~%pV$P&$Wh6W{#5-kBukoBJ?m4q}c2B*P zeKTXn6|#q1HE?r#x_y+MbZoij+-Uxyap|OhapP9m#gR*sz4;u-{c+^=B2jPZPKmjV zn=#}@eE#j?+vFwBYd6MvGZRZHl}z%KC48!t9-!6(G*9;E!Ff)av&r1u+7WV@T+)@e zL~a(cH0MrGY!=lvT!}>6Hkx6y;db^4xNVwxqyJUU>`q$Y(&DyIGheqQHu6s|Xp=zkCEh=%NC#YLARWdKmO-}hAa z)95PoX=ivK_{ES?sU$QHfJ5h`pa!}fnx;nS1723MB_C`rFe z6s6Dc zy}m%GoD6*)>xBGP(_}ZMKWuKM91j9b<;Z^Q<4z>Z+J}D;!&m~cTByehoVCqti(V+j95a}PchAvP{IYKXlAg!!0i=R zPOBCVXl5)7EPa6jY1q8Tz{U(j#!7=Zc;(;k%m&Oh%E#!#_H20rio}tZBmL?oxpTX= zipI+9WxdK1YbK+k@A?#ufxfJ>$m%{8+_>P?xMj@UF&gXfpv>T49s-lZ9pzYbrUXsv zpYW$8Eqk7eedr9Kap1Pf9(Uo1cP2$iIK3DR3FG^$XZ)m5Ta>QEt`gr#gnPonkOIE3d!uPvh?~|U3kf=Uu8QSW;~wK7(UPD!|T7>VNm)y^#&K7Pm9an>O&BrhCv;% zH(@E(ijOO;v0n^O15b^7LkUxcIn@rID@Dd%4R}_4m<}90>0`v^6DBPdl;!ia`1^;) z=4&hFN?D`;w{jHk_M6S{tHch5W-mnKS&aTqKJtf1$Qm+Q!=y+hiR!(bC0{Hpy!}FS zDz_Br0hbZ@^;`h(BGG)AA}Sw^HzcXVc!*e>hi&(Z`r?b|x@x(e&mMRE?dJ0%4vS6D z?g*BH0Keb6wtC+K4$o~%Bs}y970$m2S-Pxy@%clO%6zBo9|b5WM4zCk$yMjvAx~{l z33q?JjcH~s!VX$nW)=B+Yfx+J`2)j7h0^1mI$b&^S8c^w%T&1cpw(Ql>jD1p)TcAy zYMMnHqFv$YDAYW~xZIhNyMH~k02?_I-s&qj!Q;`0*F>2rtSq4PcPN&Zum!u*8M>6N zNOTx{qwzSK`ai3!7|N+AJFTXQ=T{8VY}@S>xL^lykK(UW@7SnmLH7u8l+i__t3DU1 zv|HVJ9D@_UZ*jDZ84hxD(@r;euKuFkv2)7eHhfByhY`>*#mSgup-_{Gl#6&wE_rNO zywR{|cdca1)=B>dqSPzC8)}tW@Fp?6FZX9$*w{F19tsgu5Xt;7;BXE60!EnH{$w%2 zTNJoIZuD&^WG%q*ADLx2{@%bo1Tui2hWB^!X_m)XQwL|?wuu*+_D*jI4X%QQH!cf5 zN^LIN9zYn^EP|;(&~@r;$h<1ab!~|%l(-;V>gZQ5EsV`s`fb~I@g6E{z+|Lt%t$(E zCv$fiP3a=>qiqBQSwS82%Yen*3aBuXy;AtrT7he1JH1>w%$_ipe`h_)pb77tv{qK; zt@|7W{^eD}dfCY1t)faPnsZRjoA|xBLm!&le|v7v$tuUwZ@j6CB#>6?Igfsx`HN1Z&TiAtoP!s2-nOj@Qn0Y{v7EmW5X8a@RAMR<=$mS}UAA?7F?oMk7DQD;33|ed6X4TvPn@eU^Cl^&_)5Dbd zqKqcsD+Do|*2bQ2Fnro8LaEd3pB;}Ot2D@5mN|BvgJut%i5q-kEAs0L<+gcbuIro{ zlS6fB_FDb&{YK(`rz29=9-*MNu(SM_^@UiVG~gOUu6iBHt{guAJ+rJMX9;;|C3t`mZpva7#^{hFiK97xDrbiL}?DEPZfBx9je@KlimJm&VN z-KRvgjlB5Pfsy<8z(LyC1qV>{Ndo|siZ|&R{=9v1!S^R&Wnrd-M_7E;&Fe=YDLGqF8W>>s_M=k1d9)RU**^0_10Z# zU{o-dI`f6v9pSNZD$@&+3r*5$w|xA#QF9~c%{iA^BVTx&1@sG8gYa}fdpl=wd6%|l zQ?&BykD6Q$zgW(0k^<+ITV?612N7pPhKM>Rk|%$bhv;3~>cjkyR?u$VgI9XlROJC` zR1cNa`}6y;U7JN;u$mD^5Zm=AG%PDG#s<&3a}du+AQ`Wa^bCw`A}daQ0D;b8-lK(p zvPQYdKEw~Mzxf04Q)ELi7?4@$FlGqx4X_Ym9zQdrNX51lRFvQSG2j$q|+y_SHommHqUYBmO-Zt^K=Uip)ce z61fd>*tZRyxHNyDMGQ*=DuIGd%Q86##g`BbfW+A80?A&dhZVgt40;p(#!kseCI@8K z7JQ)|Cx72|twxVaJ-54SAO2qKn<6&<&4kJ>oY720z_F1?8V zb&@+G?Av^@mfcGUkX})*j4V>B_-V?o1!fb%>y+vODRjx@;n*Z% z-AztLFPkvz4t~er;J`041&n_j>rf9OB2zH%sHNi`HwRRyGUd-tgD?Pc)ztFcA72vx z(*k799X5fAb6LZ#A4x27P@p15hnoB@18YMYtpk$dV!)nlw(GsR8F-Ub;I+oiKn+wT z`bwwz>?>&D*|8|F&-jV+?^ zJzrZ!KRDY$_B{a<>QkovG8Yr*?6-ns^(>*#t&Fxqay9x9P=W^(qD5Tk_}@$2fy$1f zOCZqy)WrX#!~gwl0M!ch(>0%`!~lM99N!$lcu1E3fmu+;G#Pd?UC{q@9{h(6|BrSa{7*Z`5sm*(tnch`)D>gV;oG)aS?53B z=7Cx4kDX-;+lyTOc$}(V>w!V^dwfr9EMyjn5bsYN4JUsvLj@(*d5-p+0NrZ25xTN* z!`e7r!!5?`T6C8)&~)Tr<#?I%P|dU9@BGZxEHdQXRP@+0clh16lxW?R2KI* z>8nu4`fR?uOAhMU;&Rb|yVzH1&gMbghAx%sFzFp!evj`GT8YaQE!S1e1d=lIVVBjv ze0Jz2N#?+t!%jz}*B>opYf`RQcaDoys1XtIyiw%G5~H8cd`&30t9 zd1vjA=CJL&Wn)5anwb>#tItJ}46=~t67P0Ora`bM!ZxucJawU9jS^cskeTM;HLp6K zIY%(k71Ep+(PIK-=B!=-RdA#&HEEwbkmrWgWRBerQMD;|37oD*UnEEk%)H_El|BZv zKyej`8>8J@SY}>jq|~mZro?f)le^oer%%pmY+3t*hu14A7rb;2y2v`r1eCA`UvM~9 z_oE))+P-_eDJMt8jZXvwT1qQ3R51uqJt>wOVzVI+A6!l^O0o@H`s!iAuZ+tcvyXV- zS2|_%=ej?IavXF-+-RSxMhi%!PA*{uwt9RN~|U#_`6}t%W~b7i(@i0|rSlUB*yKuX!4^|2h0Bu%Eoo zM+MxV#dn6e)ZsO75jg4?Nb0`Me;T%A$t*HVATH_uRYUy$%{DJ7;NSq`E6E^!s#xCN zHY{{e^UYv8OmX`xm@Wep#h=an+ks{!{;ju6b2DT^{y zbVFXVRd4x{UzAzcuwB;XQ4pvEs9^+wtO})D6ZnCW?Z549(dp;yj)DH_yN{P%Wq?4o ze+ifA{Ou7a+UIYJoNUa|o%tL8SzJfXcui@XQj3qHQ070G2u2<(aQjk zJ(`H?ywfqtZMK`e*fN>JiTPzQsCq645&z#)x%2jPAGd!6_z!zS#q+(yz($_f(|Y`W znX>NfdCp83wvhSSU$dn2ONY3cP_O;PX2U`;uoe^I;oly^8^+vDXHG^d?JHMVPx8m| zV#~;|*`Ft5P5+*X`zO!0&DznY>r4_lr+p56r=QVG4K3L@Gi(0JhWlq0S$IqD`#Joe zt%uaHA-kBWsmri~Kla>*2RLr{%89HgvFM*qh(P-Ru$Ax$QaYD%w9?TDF1u8%+Lc8H zpyd&5rx5r4+wrJ3(QDV@GqS*ra^bW54}3kMTt{kGU3DmuMDvZ5VA4zfyLV6~mkh3o zUyZ&c1mNKmvHy?@1VVJ3r4qZ@td;!qQDI9TPRR29S1=u{)S(LvQCiiiOnPzu$18e+ zr@7R;IUJ6q0ItH(TmZ-ZU$tK*em)&-30+mLd;}<4b3E!l>b{}EqS_g})~{P+T4sh= zUhg>0{#o+yAgj&;9S?O5M_-C?35bJcWx?JRv%VZ=w}mxKkpzZJgE3n74u0NK;wsOl;U-Bzoywm z1@8R5=2?U_*o8EYFsWoSkK7{$9a*%%iZz>Ve_#4A%wy&{3hvs2(9c#I^!MASEwT0k zoRhvlK+#&J^IKjKx2Y#>B?RZN9hsbA&~V}9;u6J}_R>H#mHMvZ?3yELngH{4(9_k* zx>!0)Th`uBUU|{rg$jjn?CQHkPj=5IIBfS++1QTwIO$}3_~O*H8PxTeJK>^3w=>|` z-~sUuyehHh6EN~FhTHg-wsWsIMt;>}toW*dgHJiEkp?=$$+VfL`FJhdA4i z73_d+H|Bg^s>s6}|4hl3W4+s3mRYdLKI=wf^f!IcUcr;P)e{6uOrQO(U4n~ZpO)glXi#7Xs&JsD)mrNSNG$&&cUMO~AU@ z9T7eFS1kJTHaASkmX=5Yesu&~jtCXdybq8uB~y*#P7?CKh3oQG(E!aIKn8DlA%zpg zV%m>APAf$l4vBT+t^4dPskX<*K*6!=#VSaMEa{p^OvWs*Y;Fo*Nsy(s%zI0!{(Ia^d z@m_nXVp?4xF4G+>Hve5%t;%@OM!Cef1d|Td)*L(%w5+ojm>pB5hP6;Qf=bqB*Gh@p z9vV9#D2ut&A(+Q8(U-;v6V1@57yh_CP?sQDEWj*F@bKq z9dO|o(N^{E|5W8kZ%WicHkun~5r2rdM!ZSg=C=KHddT+yS};5aymmtuixcZ?>R zY-Db6e@-#3hJKrQE&JDVkFgsglrvw>GyWvG{c2OzN;}vdD8e9Oo`gA38ag(ShfSZU zWbU>4t8LE)S24aczYM!peDHF!WMb8^eNKmCCim_>^u4&OWFmm^;)y+#PNx?4=~n$d zLRdIw_9J|6On{r}O+8f03GMdi8azVwAdq`Te&BB(j`yqBr31O%AKr1-c;n9d1RkDW zJ<@(^WzmAvo}k_Z2)ZTX54tuS9~8m=mh@!@Z}?Vq-I4*GtWhh zZYGd>#Utsqn=oSQEYsp~cJ&eH^_XS_bCHYOV8W%3xx^Q}Yj1m2Q1BvH(*%po%sur%n|~F7x4BNRpA+ksI&cCW)I=xPyg2{d5STu z0pFaf03)J3NX>LA#~(nBFSK;$9@3j4jT5>U%F`i~CYXKTk23HerKi10R}%*?OqELV zMka71?~=kP!^iFKfy_Et@iOfQD4PNV&98VKmlh|qc*?_AI6l0_|chIB$+x+pEMaXw4Cw6kXzZp4pgdM693r?QO3ax z$<{nx>G!foN6~zG`I0|MA^x55@|cWT{pG|Du*y>; zVX-Lu_M#;2pnUGsdB6n0{&c^N9LwFL<;n)^H9eN9`pKC#;m@>aG0uA%n`Yl0_N`&w zuR1p~siAT%Vjz?|xWOOjEY@A;2F^k?aCiKk!l_QBUSr3u5sL+%?uu9zmifIhjAxge z_ANe)?`UlIOZKQ0`dp-oHcQ0!{w}3nf=GkCx*@@T^j-DDe!P~o`avJYaHb*3emRdE zqF>77TJB+@%xVe6S4rm;O@fSVg(1_x4t;sU*2ViamjHWd+KVy5 zC82ax%k5QoFPrq`Karxzig!dT73I=wfu=p9$RumC)V=PW5~I9?8izgTf!DLN(ry@{jV6QA*H1*S6b+q_YyiXfBAz5D~!w4Zb_DODylV(q)dA5my8C|e= zik(%Y^K?7(+cJ@aCAYq5BJam*TC3Ij<-{NZB7!CpuH6!Cg@UJg7I45)0f~6Er>HdRYQJEPnpEt4St>rQbcF*o-(Yr`no;UWf}BKL4v^39I<# z+y&N4!kVPkIsCB7zeSyiJ)z$6cRCOv-~tYDWt-H-H0&8cg)Vc2^iE1wbK<*rFE)~p zxxWyY^-=Tf1$pjV>}tQD7v5&lCkMRT*{JV+^(~DsT%v!hQ>9zLuA4F6+U@MD=5I#{ z-S5+euPFXb#dG`njtK-mBTsiV%!N4PofW(=kes7Xfz6JP(i!>W_5_-4sK15V_Y#Zx zP3^YxvUhsLoy~No6mzSu?$j=MtC%e&1~~bNrdB)DF6&8bGzq9IqD@E_o6Escol-Fw z3qA|ZLMBPV=H^7>Bq+O!RWlSDKS7$5*v_gnL)kkGu`CbnDk!DuN+1l* zb5A8aq|poxE&QjAs)a|gcXKF64UDsvrvDgk8=l)(0+Mtc(ObbkYms-RaVgi9kC*rG zpzk$dCrMK0j?QttoD`Kq|0JiLBF}lV$UISu?WIJ?&)vEHtm?T8X*kxs?k!A;x({E^ z9MD8J_o1hP1jPX7RJzB|sCzZNh1C0aG{Azb%JtzZ)8=ySSrZP2;GVYy39042RaT3M zH6eMdg%6t5IvGw;#0G8}_wCt+0Q&Fcr|}=F2OHvU)flR_+ay(v83w3=p(cF`yW$iA z!1R6PZ|da~y>J<#x%UE#hCBtYc7`Ud*MLivV=^Gg*Mskx)8(Zd^pjjgpNvzW=?K86!uBV1g=7#JZA1~n!bbXN zZDQvuZ9VQ&(Q(BN=;9WZ!hp|O5}%EXqVUUZpP@M+R(`L37n_mW%zenjO_K-wzT--Y zroIZ|Ve~Qe%B35O#YK`GmsfAXw(lAil=AYMlm3S+53gGHi$65*H2vjCvTD9t(A(}8 zNXYeG^rZsz##L)&^`!2%rRtx}Q-(7ZC{~Th8+7>Y(sC|P{n2(RX*_xQ%P(@%Cb@+~ z@0%_Le7@ILdzy_7Bwl>1h79)xOjP5u7Jbait?g^^s?(&TW38b}!2zL-_?-Y%%|C%- zhBD`cYWz$?uX?Kk|BLZJs-B~V$^As>g0pzM;&n>k%K$UW=b1WhGT~1rgo`q^V z-Z_pSq=svb<4hUc0E zMe4lW>kowA@0bI;>?IMt`<1&>+uE7Dq^v*P(drsRHC&tTpJi)y@H2ROlr`g*MjU)W zQ3NnA_E7t72|8qRq)h*htgP~rKg*4!jdUvVR3WaRU8|wO;3Kx`@^wBYA_CiU+4IDE z#?EEKK@&SaQRVlYkzHHH3-dj*PAMEUd##nKf6`!NPMTm_X@PR^_%q|i>T8DYtfBE( zX<@%ruqu!ddYpHs9;o#K?+0iCLBp^0C~}hCO1f=t?hY~aZd2rxTeg_+1H zJ{NpX0K;LZTrBje)2qm-ojudGe~~sxGGP6rwsz*Wh~0Cd&1_E^+hJT^os6RR%eXry zGrkKMYvL?wYa1)7Vf*Wr1gKkINvnaj?nw0qHxO4A%Xy&p-^;Z9S=B`Bg`UfchHJpT z$3%`ApSH*WqCT?sPCW6Cw`=8i>;w3gxJl`mWf!VXE6&kYW|4^R5ut9158l@3*Ij;{ z*C?pve05oNK^8KHS5gi;$b8=*@G3pIYI)0E)nrwfVNx|*FuT>>MnRT_8ZJ){T^Q6D z1v+3mLX0YVz2dkFj#2c)oxjd}TrcTIx{M#pj+Lg(IP;kWPuF=Elkr~ofG2#e^WEY5 z2NgDG_Zq)>AvLq~WDgsPN#)Db-VF|?Zc=tpI&0`XR}5B}xwe_&#wpk4^)`Ucq1*1W z%XkFI;Bn-Iv~Ol*{BO*i^+pzWWUxj)%zuim?`ZUXt} zDo(!qGXOdYtT$MvU($urj2V`1rp(}(AGWL$X;eRLzfIUw9`m}rAd{dpuu-E|M@<%f z?-Uv{hyBu7!hYcz9SO3hN`2ew_~Sk3o>nz^06ahW|-Q z5&1Z{)zV5mB)HhdfrQjkxw*&{eQ;xWi~GT&cm=UBT?2Dt&f4);-xnb98U%UkB?`Ob z<(_>vR{@W%HJ;5wO*v&mW-E}|?v<6WJnXw__(GUa!D>NJ-jhi@3^y~2Z2CEl zb~X#hWv{lBTswVb`$UgM|BN>HL~qj{IDSPzDK_=d`w3dx2!u2 zB(FA}p|Q!KjcDTDDL>hS=yJN`z#!u*N`E!pIrLQ$c;bU(3 zE*O`7otPjh3h3*I_#-h^qHiPxeg=Fkhz-O|E%CJU-c_VzVf>nspY8#vHp`GyiEqjF zWnjy=etzH1Dna;}oM`ck+>P8bI@7jk+l?_RfdyGF0z^9f1{zC^Gk4dUJOadi8L@cN zJ-acFJB$)pnO2^XO-}ohY1#_4IA8iw;wfzs8hVRqmeCMOCr2GEA+a=-jcd-Be`-YU zXG8Bsb&Zvti}%NeC2LCSbNNqKA(B$NTZ=DbpjWr?U5n1;hkwXtXns27yc6w?5Iek+ ze``Gkz7}E+Y#gO=Yr$LgmbYbw7fkP#5(?^-v7dtL6x%!o_C6-eYfTu>*8gbHqkK$r zfAV2xD$Y%!aC#KzGlO*-{#)@RrG-Vw(2uIPy7le?7b}Y`$v?{QWU8?2n-O%$>oN`vqDTL*~uh3xl-n~mitJ!tFUHB|S# zS8s__?z-`qAadw>+5yNIXWz@0>P{Fq(Z6s;;(nlHafpG|^;p}!mosk6F3MwP?R+`< z^K-T`FWb-$jYWKnTvpwU^{!wW&jyM?d@TmG%k*|KE4~(lNAl&a_IoNHS8;8E0ZX0T z|7e`S2uYPpekvf4g>DLr#>cg}C-L} z%6tMWxxrU&+Z~R;GFY&+QBCSIexyb<`}kjAxqx_jA)L4`qDgZEyl?cBZCOl{Lw0`;M$t($~;z|KTuEdT|smL7%HX%YvphBwlTb;XmC= zD{jZIbTh3!e}xH?(b!AReX*he5-4V?;%)-+QzbTwOr9bKk_3%swxO!Ie3LK50UdVV zU=!rMMPxpXz2wVaoAdG;%V*g!mKzDPfVcF{YTkhdXrXrztjc%k-R87!_sDnw=r^l| zGMtu}Gsn`!FcrOy+_OtVg0(!V@3=HBp3iss8GinKK<0wung@oadiXJYXc{}i5}2Lr z31mWv=@=3-9&zMngwoE0@dKL(`R)MnAXp&_P0MR%-BaOt(85Vdw2REz48WbQUA5aH z3f2?^?GIyGT z9IsZ%{vFvXa;G~_xBG{IaKAR+;t%gxt48=lAHE0!9^CfPe04t| zb2FK@M<#`{K`<5pibd__Uhm>W^vtiPs##6=>nP7U1;k>7 zRCv(DNOET*Tge%myjFR2yrW|T1_{R5q2F8;kbD-2<-@`vKU&7)s{b~iwCk7NBP z^vD}$bHnL_OsEv8n{~HPqYzBae4X`#SocgLNM0u%i&CV-_rpL&7JI^(CE$ML&B5WY zF@tX0KFof8xzJ1|RVr2~2}|e)hr8iBXCqLp9u+Xrr#@!0Z#mzYnW%J=_z=pv-J6ZD zB;4WSs~_#&CSI>_)X}nlm8X>7Jl&fZ)LPAUeWMPuIF?#!ff6fkY_#irh4!D`_O8<> z%^pFTh)V^2hTVE$TdCcR!_PAs#_)QFV$W6J+*p5681uRnLGOS9kL$xE5cHIb>Z<*F zLzwJiTBr`LA`Vxh^gxuvGJbdu+&6G@{3!Xu)yazGmf?zkF9-#@xrbEFs2>^t2xkH- z&&lmaW(iR_G5EoVf@bLh{V3vwm19nn2p`;<9LQ{(C9x+lc`28}_V?sx88i-ZZoa5|(pj3}cB9HonffTU~XoX4RpSc4W_TuWurILir#4hQ+j)&JFXX zBe#*1;$gv7=XJsZb`wSESQ0mvALi%BV4ncb@+f@JdN9TE%iawBSZw}4ttAmRyRvzt z@vac8xOueM0PC8XaP>Ui*J=^5B2B6da~&gMO+KAwStu&VdIEkzIX!uYwWi51Wn;<$ zA>MOLOzaBEU8;(UNdWwM5g%%QD+WIx)SZWPTayD$VZYKVp(oJxJL`C1Q16ycoTvRy zH?nxuG$_8=t}`L^hxIIKiy9wy|AI?a@JVIvT zFICjmtm6~F6AB(cv69272&^k>KF-U{8^Nh-QKvW|kelUu=!_J@ZTTKF08=!eeA~_r zlcOwFsKbuWA;RmlT zi>vcE96ly-2^^of+`Vq1lmL56lyJ_H5!7vlyRLqfdF#l&?>f1)_>~(mD4`m#Z--|@nS@k}v9&GrwRRze;Av@f7UJV4 zd497KPa!{OVcB^nouh^R)cFX`XF!(aNbj9P#ZF38zb?&y!+*O9!4o$K4?dc*b<--* zEPXg}kgLWPC*xEU*6WT(PWLK4RX$yq9O@#TyV2!F?658n0v>qYR!3_)(+R`%;#Ok1 z`{TVdVm~UACj)=ZeJD+aQ+BE&iTnHI#Da3gXPK(EGX>gaX!-5<#}wODt;|;dtui&0 z#q#rdP)X)6Eos5SHw{-QTk)BL6ro2JyCE5tBbL^FKgrT;`oZ2hkl``8q8?7}{9)+J zzw1NA&Ias^>$oWFadMF&$erfNxIn%NC@X&ZyEmfD2Lx5=8Qx|h4_|z+t>cO7!fj_1 zw8o_{o{#a%Ydj%A?C5lYeQgF<71-Qc^7ftkG)|lY9)gLT#48nVADGNDv8SRJAQ?7- z6}5_tD2vAL`v>aV_JwSGiD9glY3&y0fNf_v?c7~Kfr=nSL)%pn?XlN0=6M^PmsY*0 zOoolHLl1va%rl9O-$<|9?Dz)7KtEqZRPc|R?44q;)s|{M6k~lukfKRA*|mk7=}As( z?`hEQ-FYU1oxa_A>uP7L5YZ7KrD3P;5>DY)DVJU92jc@{q{_-Pnt)3DQ|7|(UX15L z(`2n>4h;)%=p^1Y=b+pLJqwW7djoh5DcL-y8Jko}(30JNsQuGKTadowS7^>`Cb>D6BRu_397T zF9^{{@GSYo(Mb3`ODK5t`*HxjR`|rBu@TmwNkD5v(+KMuvbtHgWgJSh)wM`N*1%_j zd^5UfC9u*7P({L(RAZU?ZREDf-vKq}-AJXVT4|YSh&KHm^6{W-m&;XO`>G5lw$k(Q zo(RPf3!P*66bj2ugQ{&&3GA2S*&6!kPU_4dr8#m=GEP`XbM39w2X+TNpS5wZC31Bo zV9}ZjljH&Irgw@T)suxQVlOQL8gSD51Vsh&huPBamzmdU`(akN1T_0J6im{RR3rCl z*&ROG_H|a#yrm$sbX~40`AqJq^bcoKes9tC+Sa@(pc9>>wL{p_^)!Uip0yis@I97x zo0awg1Nm~DuT`&ci%T~--^O|cr+nz%La`jk>mY)^h60wo=(R=PHSU&aA5yEvulxFG zqN5rEr)Yv#yaC*ZQN={iI+MmmIlRcuj&-6z7s@*ELs7~(`h0Xs$0WOrv4%gW2GNR- zCoz)|^POp3F>UGVw}mFp*xhr|VYcApOD$NntR*Bho#8On^}I7ICb4Ud+cxHqGS5`@ zzJ5lI>B1?VpNgOTgMp!0_a6B?R(Qb%Hu2K;DdK4r)6$q;a0<_T`B@V!GR|42V4;BI zDg*?P6*BJ(*i@_j2&b=6^dIP1mgdUjAN>$sW_vsRVQf@T|Fe1h zIxW>UYa6B&o`PPDrq8AKN+eZnKvSNyk6r1*^IRyy1+Leh=ZQk?Ky!gH06t53%xe=d zmw~W7CRt27J;q7Cumb4vpti#h6BzEKu#(so^E5Wj9JB#l!s>!00c7(fB?z#aD&PXKr5alONP$$G&vbjER^f8%J-|i5f@0XntFe+zNQ7 zyH;W2COxh~SG76)1A3ORzZ?g_Z+(mbYn$n#waVtJRP9w}8kzyj3oc*9e=g{qpawyX z%C}zFEZciFA-Wp0^Z7?Crvmzq=9wWgtu(4_H+op|5-Q&Iu zI2Cdr=LGVtWX2y#cD3Y2Qr`d30l`u*If$V{$G;2X1a6!gI{%#Ajs zeGXQk0kuv^{V!+{=)f6sg27A@!jAWin%pUgK($LPSJ2y37a7WKA_M?^@&M?`S$;u3 zF)zg^B&L4GESe$ulN&OUL*dS}m2=VkJI^;8s&kiehP2LGJ>z4wJlv43%@txi*liQiQ z$Su;$E$_ic4FvFy0#&>~{!Cw&0nF8Vsv`AoIu0!Hc(*pZ3rP@7kuq8Vs*W{Sfp%;E z>Dc+u-)*8rDsW@5D=JGXvr%x}_wRJsBj#KYJ9Vd{Xi=$(!HYU^0FR)&c`|Ql9-0Fg zc0c4)(>I)M5aksRlt2e~5k;Vlq}btepNmm(`X~LaX=t9w&c&ekPq))vLy$rnZ++YS z=j65t%qn~5K77~s9~g{=#_y_iy_y)S#8_7Jlq>LX&)4|-w2*FvkH1Vu z3k(<>-R_i~YX3|=pukwnx)S{bvdkr{{c|CY{GYFjs$Lc4Zi${s?6Vm5@P^g51J<`Z zQnI#c)*-qj`tMc@FtVqa>hH%X!R=jMSK-(GmIAz!;1vYS*1_NHS}qI-)w#_S7ybM+ zYQ9Bc+=@N-c))~9fwD5criB%P8nN?LA(M~FHQdSSd$HQCoPcjXq4Ym5_z-bQwlWK*cKr05|36K6!bU))?wD-us0>Z3<4{}-) zzqg}>1gJv5p=?-yLv24VeE{h{UtBnq(7q z3N>Q|z=8OwfkP1192cmSZKEJ2Y78S3)vPSJM%Qvu^Opn4;C0(}oin|56BJZ85o zU!OkK@L#b53g@MthS6;6oUABO_b3ComMio&aAI_IG;k%sa&L%zeeFHHkAd@j1h^ql z52N-Top3;Ig0aAqfNWMf`=q>3>s2WC;|ItUo}XuJta!tc1dIiV6xe97Wn|XLl_m#P z_>ilI0AvvI{jYe`_3hu+1O+&cBkL`2wEtmAYpeh>t@gG3VTYOH4)yKOMGsvq4qx?6 zk2@ztK;^VTar2T` zV@7S>0Ud$|%yrxTbG^XK5CsO$34pXLhh$rNL;h3GvJEtL2p;nVxT4_`9%$ct_^)`$ z$K(?d=wzS)C=VcS2gK=(Ga(BWQ8ZpNd=I3qid55znE=>P-hDm}$Q=Bi_zpe*tnY^c z8o_dm^a1~ty6AE8qmu^8z~}$l+KkSjlgUq~_;;>-h2ue{@evuwi+e6pu<*@f{fr=E z`Q@L1!{q*b)23arcfNt8=gK+f(emc++_3=`snO%;EjZe&me39G{Qt}9OZLj)t$``n z39SvXs0#3{Dm#ILPNY^p!h|Ok08W}Cvr4R+9L=k2yUtegDWf+JVqNq={^yIDdGwa# z6S4$m$wUQ8sbuj7E4cOHh%eXry8kiWEajh3?!7jMl=4khMXv>9D4dX&z;3XHz)mB& z4kT8ANa=S%LwQDZ9@5hHD^f-z&C;Yj^w@$J>iwf0-l-eC$B-v*L6&~Gw}1vf=K7HS z_>*b_V7%+Mm3>P016vACM3;H(4>ry+*a~L`);yD`_yHg@kNxlx&ckXJ!y;UMJe1;X ziKCs&vXxH6?3c`XujZZiO7+?z(=JaYUmkqvSo?!+qyuD+p8RGR((On0$xi|Gw9i75 zX?{Zf#RXA;6XRUtw;|)Z7tyaG5nYAIVwMbsRZTS{w)>sQO(-dfGWiDUJ$!nl>Va=ofqro$g471!FAmd0 z+YV6l9suT-FsUcs`i28C*rOMPv-DINu_=>e*IUi{6aO&@Ci7O1WVfWuX0Ml}UXlp} zqv*&|vVRL=%o_5<(=w8Ny}bi?^~Ft17@|{3xEw0xw*{1Uur;*;xl^MSlajmXh{YcQ zQH1F*mA9}H#sXOM!@%AKh+CzmRRLWKV~5 zz|Utk0I=k&tS*{Cu&(N$%JvegD%c;cBbBZN()39|I(1`UQ$IQl+dHK< z$VB}De~*pXyJusvE|kXH-ZqO!0V+ihCN22G7*E#px+R)l2k4lr%f^)NJ!X_ z)SBTe!)3M^RK*~Wa;5;6-S0ataf(LASn`JBaFv67V^yP|CNq;^_}gv|`d$q)p7&+b zJ-*NQ1o6XZsoOG*Qt+?b_tk-qvnFYDrUd9+))vRMF`4;LH;Brwq>*a%v zUDy5mHk+T(9Y$i=EM$o3h^9g(NL;1#g|lKD_TjbEeAe6gN#l6ge%qb#Ri)vSptr9v z1_imMQqp;jXL~&LBHuV*mCESu+IFKoGRf*t0j%m9wtDrtY0``;1um4)fQ%BowDdFe`@N+K2j6FTar>4ale>~f z04tG)0zs^Ze$7C3#2=kTd)yuO2i=+cMWH+f$}Luc9+__BhA|N!Rz4dIC}URATCvFywq zic&Rv7q2njWZkY7@GK)lyG5)eHjX#p;U9Ag z6@@}bnk`lu=k^F_lF|kG*NJK~ilh2foZ9WqEe=z)c1mklpj~ga>S}q+*G4NGJ`#Kg zA4WaRjm+#e*o4=Kf3aQIE*0b-)h#e!gRJ+Xn?rXp&zn959^AWX#G9wk0XGc~L-%J> zUUwDp4&%H6A<{kFxpYz4#SBA8L=@k}uKY`a6$WTa?P?2ZNo;?ws_6{v_aa^uM^}WR zxJ27KpsFE1IaQi@&!54$2i-R9)7-(G#(DVr~SHW|02 zpT51isT0%(nKnJH*#yW<0Hwr(q z;+ECqT5?Mu+&q{}{s3?29I34^swgthip98D#ORWw4plw@3GkQS; zIths_bkizSc06D;i7pl_qf14-fSl#C)-UNY*{HFA?6Y_YV>UOS$juJlip%DwY?N=< zv{2rq+Ih}U*ew~!{*c0au?9=L>hC68_WQ;UK&C0Lfp+Zf+ z2>VuLV*C>PzP_u;IR#Gm%}sc6#T{=e>mvC)b0eFm&xy^kD*1~15}L(K33XLPFt7Uq zj<5C>36_YWMNI?vEV9NsU;VUG62P?I?lYliYAg+k+>z&!roW>#v$s{4%)Wqp{AX)B z#Odb-^8k{P3u3{2WB-fm_dz;kDbtMrOMcmzP86KY_VUq8Q!)}9TgJvpPA!-LX5g(Z z3!ylTMrm&zw`$oWxt8?T6AsC;%m%Z6hRy2uI*ulkP~Qp1cHXT$rdeJ*=)&m1WEU0c z8f4~BXys%FNpDM?LZnhwUNsv=^`i(Nki?;7l9wf)Bwf>?B|aye6Ww^7IP5i-+>Y^y zg$)aKL8Z&t=nI!n#t>Vg#+a681 z7;g++zJDR&s!NHv#(O^$|sUYot%?(k4poNj|Gekv`Ulccj+Cu zj{Dqj4AHfgGmQMP=v#f$%Hd=UGS1t3boa;!A9p~?F)V4)Z}Z!GegGwK0xhSHtn>Mqt5$l>iO(3T^2~PAJHepJ_%3nSE*2{H$i6Mw2 zEi{@^wjA<2HD<1m`x@8E{ub4t9WJrDM^Cc^ydUPWCd1Zw##gxma)sW+%1nf=|9SL3 zjTv)gX7@_Dv(Z5ewrI-gNO;U`T>4!1X!Y|H2M6`gdkB$7MqjjwbP|zv3rumScfKtu zz>=Ff)3Vj{NUovRMLy=PGMCuaZFk&mLXMZL1)aWxTQRz1(&|rwb2v{cm)$TLiWY1I zPaAoC*lC`{t&HTH;^D{QR+4CvEpVNyu!X~!PMYfe!X9Nwdizw`tYVDkLAtt}kb8T_ zm(yalVwx{PRq2+H+ntB;!vV8l=W3w~N;f1vsM`gKJtt@-F(6R$4pWYoBd&{e=OcT0 zs!cyelXNq;ekio9SO2R4Tk2K``qs>2Uka78n74ns%%xbuLb+8>gzjNKselq4v^nV& zepljNP3D_!yd)|CO@tsq^#(eFe$;(u-JW-rAs=<)wLH3fQM(gcwGT1{RwF3#o-%IR ze^3c%Mb5F59GRRJ2lE|Olv(}E(LuQae0oADEagsE>C1uVoD%^D{pW&t&L)dSZH+v| zaRz;544riXMXV=WuCYhbws=@!EY!sG)#OXRWp8-;ijm#pkX@ZIE!~Ky%n-`^&=yUN z=*nUadnsvqCs=>7F)sTC?b^z-M*$_E*b4Q~K>; z>!~nrn%)P#mMbgQ(IcTg+A}7(95YT;axcye(ZTZDZ~gai_&~orC%MOn!F(V<0(Mk=FWG z)Z&Cb-a4c39YL<{8a{El_hfy@dY_sCZ7YG~6{N_biKl|jBKqp|O@Td{XOX+v?w2-SqBBfQ>ewN87st#S0n^ISW%n5n$3Vq>mguOd zH|6`~2a3Y8VU3%08-PwP4^S$%Y6tW-wbuj?+uZ*xwzE_AXCt?ay`SzzRMRu;zUMtt z9x(WL-c(19?GS2nWpw3y3Cc|7J#R@6sD86$MVy7QnDS_AyTkj1l>>KvqT8_7h0ToQ zo&t>vna;wwGK?8@Ij-!OmBCYB`7=fNwmbGdxT#Djg3B|9DxRfNE=f$}tyU3bzMrlU zIl0T@t}>%$-@3m|W$%K+BG%&-RJ9IdMG9`BEX#)JRu#?QaAf!%Py(qGg~5ML4&)*~ zBvlOu3b8-}2r86%jhURI^udEcS4kQ{9nF)+M3R!>RS#9(@JHqmq5@;wIy{*PA=FU~WZ+$fC_CN?GBf`Earw$&{xDxZRwpdU z-t+2~3m0R~>Or~y?ytC2bIMvE*YFia!v<)-cV>BF%1OJ31w3T;Iv_^3_PVojodUv9 zP|$^~U18SK3mzvGBD3-ScQk_*33|I?2jCkqqN4+qjmZs;HTEggH2=jWoj5J|c7+#UjLeXuSQ7~IQ8 zZW+X6xZ<{o`Ahdx`XJQ;!dVpG>Sec^eOtV|56Sy;i1c)$a!4o%Z5>y_;ZPj6P9n9; z6o0rnUO{S5#hem*;p%JMOb!ZM-^LAU-GQqV_(O(`fJ9Bs2ZskfDTLUdncB4^G(671 z97yy7e(nyg$4aK2g5mxN$fT2uO#k|dx`O&ot{jjVT)D$v+IyAlZQq}av(@ew772t3 zttwFWIC!i~Nq#vswFizY?5x^L4hW1g(>}DsESM*f^8Tb2wgX7PtY%ler=7Ruz*b{4 z{Tijiw>$6(>a>Rq@i`XX6^tkgE|n zGCpJLQWHW0_{x%_1}B?j*NQ)|Zd6La_dlpxDsc*X$yV+9zSiImj~EsKh_F?m+0ZBB z`H_Om+G6c)_oK+7lP_NEzy`s17ms~-vj6;!P`rVn6=UTXWOAfIeaO+% zWb3dvO=tBiHEn=0mGz=F(z4n6g$dNe!~}}4R<`#xAx_r1c#k>|cg+aR)96Q_MMCQ; z8*dxIkk)qFrSf#7N0*rN*)eW2pqBmY99HEnfpyfaazo@sQmdH0YB^F zGiBH{7P<}K2YAc4%NI!S4$pkN(ScG>?~wFtHROq{0ypq74C^Anm-`JIfr0ayH_`s^ z(7+Xiun?{C(=_7S_dD+g2n!SixqL}b%h@4D$QKr3C(7!BRG^4h0?a5dzY95&<J; zZ2ESU-N){bSpJk9e8aFw@*3L=gkL+U*-pJdEpt{v{;j&giZ7L2sht0DzFl!Ee|HIKZ|Ofd zGo-VkF5k-Ee<)+|q}u$wuQM*m1@-aSt*xkB@F@IAq0ZS2*7yVM-nt~QoXES>YwRi%LPeG~rxP{RdF)(k)?#Esko zmuV~mlT!vPBy&k|EHI^9YiC;i+7P1b65S3lW&?aoZK>&V%k}tuEy}-N{SUh1$Ljb3 z+sF)p`_E_HOXnqns=06#{SmA<5txO>alb~%u#>{f!Rt5h{2;KTWk{wrntQ#^JJD6! zbpFip8>tIuv+9HwK1b;r^46wrDCo%N?k3zK*IH)AR|k_k^HwPFTxBVUX92g&V%ikP z23{qo7DmM4QS{K+64XJF-Orj4#jVtyQ7-Uzchc0}qKEW@rLS?l5^T!uuZ3TtPl?|8 z0=%4|v3sO^LE&&=T(2wZQr5iA1@(vHY3;}_v<`JpO*r1O=+MHSG|7IeOzyMjLplTb5QN2 zKY>uzgyBUy-5K;9X@k6HM;8v7!^of~-vy^wGn@N%|LW{HkYZTnf1zP7K7_L|G74H3 z-<&GwE4axT@j~bI*);F=g%PB+Ry^`j+}>*VWkKs36MdH_->H@_dbLCe#ElHDzHOaZ z*whP}0)DZG#8=qsv@DzCmm+!EhBgG?GF3pcCEwaN-zA=T~-=6`Q!4Xf$hoPhK6u z@5(i{xA_EBlfuf!%gXO zSic+|ZaYM-%?r#XX=|&#9_gj;5|L%R~sbGY_V3V!0`Cq21YYVLE*bpsG7ew>t zE5H230WD4+> zpSmCE3k@#U?OISsT%;1eXa*1ztMkr}f|6W%q;bvbLYv$+kj$(Y)_6*bJ^U~N&o+M( z&+XA*SDHo?G_8>De6?zXKL_6}1~7?D+)5hi6F z-Vxt)Vhuxi(?I4dFcr8IRu_)W=urCFa>tJFlYtKJDLH{R$}7LdfrU5d&n9(r`0jZn~UzgCES2SwE7Lw8lGsL-O?;{}SwZg$sg? zV?tpB^&mbui8oOFta(1l?arCy2H$79zMk6-MW;&nw(Tr0POxiQhOsGArs0QzhmAiW zf`ek}sEO1e2RAub@uJ}!d%r+8g;{NH}-#7(k&*TTx zx8C14fKz}V>le5VWQ#Kzo zM#xE}g$b9>n<<%vrGG|0DSO)g)aRa_QBAMSjvQ9=s_$l;wq$Dk?pYA?Rb`sp?GMFO zR=b8pJvFRQ= zM%P}?5VdR_0@h|b>@LrYr>yUm9j&O9GrZWB8*NWVoubJuc^Y%|GS$tsM@rovGE~B; zFc~gB85|;9wK1P?D~MRZW-;u%TJtKqSnlMN%>#|NAf8Lv?FX6x zTumW)JjJLR>W4Qn=g&hi0-|E|P65}8vISy;T=OyHT}@rqAk|gRluSb-e(9F7h;6AY98}e2mpk>5{#N`OeG;?{HnO1|BcDi%X0Ab==1wTbutDPRzrxK2&Iv*Qj|<65(RTk^laXJFgJ`1))o+?Ohj5hMX6Q>>l&LjNbY{zCTjr zP_vEGzS*y@RiyQf}7X=T}SUA#BrS&j+Tg zgR?2@Gk{8i@F<%KAd7ed@MX5?^<3IiLkxm3SHvR6MIlIdFqLqs??X}A=`lOHe_%z% zty4VDJ{~E?aXTpmu<*GX44+ zZmY&cx%?f&{!l6qw4es3Vf3a}*^XW6U9Z(0Q%87n3jb;ZLdw}OJ@rDrtX-C+4n@_s z4QoDfy(oTGcl|zR?Nu%x$IHQ&iuUq4wrhAgpxg^zh&N->L8QNwjXIyf0Pif#<%7YD zgMn}f=RUhZDyV2ao8BPn+j0w>8W^99&G#KIB=u%!Cp|d5Ft6nS?jGFf2Lx2No=G%I_W?!6CV6cCm5p9ZSVh zeS7z5Xn1376MaazY?nD~R@3`Xma~cC%?U3e*5&n1?9YKBgGWrfhL_7Lsw|{VOFTb( z&iw8YWF)w((<_E4RgxOu{i|f~wfmyI_?+R&Lwy8Vepfd1ta*VjKNRs4x(e564cwh( z4K3322t9X+9g@Ym#Axcw7I+3`It}J|3g<~2osNdzv3wWIHTeCZ4W;5nuDD0*`o`_v zR9v@YB*u79F?(=Ge^O7yR{u`&lEWZa1AmoYQsuSwFR(?o!3tE^s^PZO>FP3u#CvU7 zGLg?U$C3hfgDL{{2i*>vKjN>CVyx%uW{Ir0L=QeQl^ZXJ9%ThaPp@L*Q(YXJI)z&= z?2we9g2_v5-ZSo&Uf!$Y+|XuB-GiWNIk5|d*GEEoP#@#6QVqwYvhS-sml1K@s!rf@ zLi_6ylSaZxbFw(ioAlgvT(+tiV1e_#o*w29|9&#is}`c% z`o(Oe>NJz(<6ecV<=#B-oqnK?D$T%2)YmC`Y4+08R`S^)D0%*&R-QEUvf-JU?o!PM zU$3e3hkbt8G+@Gs*ykRN~uWPco{#ZmeLXOyr56K4y zr?&IfV=gC56CT4oEYyAlWwd47E__Kv4CU%wc7aiy`2Sv(@mU$qwTpVx*kEH#qzl|= zS3262G1j`qp|G226!wu`|8lP?3AD|1c#j#LNKo&6pvM9q` zvS|oW2Lh&E_Rg|w89(Dg7e>DjJK2^IIb)0V9@Hw^4BF~NsEB9OmXU;aqC&mU(qntz zPElz^4a6l2x;V7m|M>dG{Cxi5jW%6TqcA$rt=oIbVnGXjg)K2KZS>O`ULY!#eLIF2 zL7(c;@G(dRWmajG(7OXJR6pn{V#v|Fv?69+(Vh94%i)r!rP?1g0mIyb4i&xJBhZ@{ zjAu>k(k}SCo<@5Oy1n=<^*>}uKSx6 zW!syF)%vFSVH*^xWyzGvrP|`{QOU()+N^z6wSG}@vf8xk+ka~bIsb}FvA#DYlJrJ5 zAFAe2l|4}rpZZQE_D`zhO2h%2ha`hKKCF^I=RIM4zd20 zSS{^p2%nn0fqtB>7M^o?%@vilPpN6qe!c3+`k93zAzAx(!pgys8|PAH z6m0fB=E)?FTb4U+avY~cI;77#;CJ{$$Pz)@X;#ImHM$!rPi01d;=F}~3f8;pRZDBT zIGoq3q_VnjKRg)Vj;joFSt8bD6DKkK{Gm*xQ32Bcz9sjffM^vw^!3<`mnM6($?FLl=4w**gv7;e9f{spV#g8T$m2{wcEGgXZWPl=ehAqnxy+g3k%PC*48ZOnLSq}XGBoh zU_be53+HN`l%h=FbinjbszD0TRcRic2|WeClQa(B&fO){p|^Loay-KH-;fbQEuKCH(Gb-OW?b;sgF7QY%*&>Rt+7k4%1Sw@2LPV2~EbP4aLo0^o~4(AY? zphhNf{H7D*t^Qw6Es37i=_pB2=;0tUiJ05a&}4&4QY3~0Ew}&r5u8?ANQ;K3Mj|sP zD1&j>clgFPoQr;dd)^%GIAc84vw9`!8W24g0oL+&RNGz6A{^>Mh=P^x%+rX|szsdO zSjG=o)dj#_KT$+#dK$95{*{>Dygi% zytKL`3Ux08d&jh`J%;V%Ekz3MLC1Yfb}C_N{GgSw4E^-V*zbRzPYsI5i8BCqGJ|Vo zj3%3w!KRr09(_Y?E5nO#Bk-EOz};WA{=?}?uW5?|JuPO*w-v8_e(E>V2=QI>LMiQP z_k>*oDvN;Yr+GFBa%?2HxZ;Xy>s$}c?9CP9)|F=-u|uWT3I~&BYmlvL5kwB~sfEI* zX6WM;<8w){9o!Dl7`RxPrt?i@ryhRpZCLdpLAwMF;%-DgC%T+-cUuW9`y6)Z=IBGi z(gO&Ot|BIoF6?vT5U)Y&Qlk68)|B|}mp?#Qpn2SvZniP~oa1^DW$~1NbVdor*E^>@ zv#lq#_&s>6VZ*%xyZMo4s%5KzajI(0H*0^^fYkc5D^sjVGOg+=Y;w*WI0!HM$@KfI zli~Hb3{&}MX5{uzqSbWh^1JmO14e-shL|QJwNLL2cX>?sNIVJ6eflBPaM=zAlA_70NP%VvmD{?oz3hi3NnB5Wk&;MAj8;Rgt(t!={fPL{y1_a_P)%NZj^id zBy;^_Y8aQ$B=2Jrx5W*{B43Qc>z%v;gE#^LJOVp5&u!g_>stS!LbONzI5?Ke904RD1=kBy{GLu=lIPtn`| z`hQG)cOaW>7k4^zP_#w0b{|zmjn=9aR7*>(TCEXn)!rj^LMf_>4r+^0d!+W3qOF=S zf)b==LPQV*5&7;u@B4n=cl~!KBzLa+T<4tMIlprbplR?8jBnjiuT64tg*h5NVS8@liZMcC@-9srozNMB|4?p{SilWt#ZaJsOhCN zlA-IC?#FQsRyM^GBBly-CgVRkoC#nk*Jig z1G;Y6xop(sJjBx|gW-*8Fi87%3zhdUyhWMRq5z zgE&7dk2LKhPt4qS#1!O`L^uO(96j^vMN0mN@9I8R-gWK#!P*3)7JI80v=~4sRz|@z zgZa3CfIHiX?-&TPD3gLjN3hdF6R6gIV zwn~{M@w}3wzO0J0-f^>Sa*3z43*$Kfg1AlO@jJ;^IO997aGw+!2FVVFFW!)eHxDJ# zgSnp_c-_yxGqy+4t2#im1>kV_YzyJnrf6S65hdWb&S3}#JEj(waP+?L!H+#JmqH6S*AFkcNXa^deY zFZW3{b!l+Ze;vo98e@iUw(k@Cs>RAG!VL)qe?QbeF9pX1r>tZy#1CNWDY zB2_<&^OkM+c%AMKNh8V9=Et+7S}{(mPJv*=K`BLfw040wJ>k=m4@>1 z3j78?w;a!~plBx+QE{$L07>e>L+kw3TqM-cNpYf$JD#{r!OFvpB!=oN-2l6{(s7EI zta>EYx<(i-00KV8fkM3-x7AY;?)JYJED&BDBk9>fUwRb@S+Hs{KQ{Y^*kRTeXW40{ zMvyOh&l7VQ2q1%|M&HYxZW{S`P^#?{ZxP2LetR>Xa#~$4iO*Ch&)A0f0q617jB|2w zJUubj(k)C1B)d2xAeUcDy2wy9i!uV-yF6!{x~UOAXbn}G zc+($W!_saxS}5e(UFO+H@wMT-mW}_XWue}x+}6z5!1Mi?TOnBWFmf@=6+zmdu?pER z_;(beKqo`{p?KpV3Z=T<-urZ+x#>do55Az46-wHQY{<Li%-;pV z^;d0qy%|!Zm2}jYw-7EE)bxB|`L~jcFU%%yqTswGZ9q`3t&t>slCEY;qc+Sa94r*i zT68A}Hb_L0MmLcGNQnR37wId&;o%3oJkB(PUU#Af2JLd_+XuqT2uExw0+H_A4?4-iQ^SV!iP07w^i?~wqO}3S+ zoIZ#Z4E>|C9{Ski>R#`4G*Yp)%kUQ1Fyb<+H&0=?KL}JLdw~t%x>>?GDb#!qSqUj#pBQLT?qD! zQLmvLIxQ@Q$Y*%l@(?|qGm|+%t2MrJU(|IIoK#<;Pama02hri~urX5YGIw!&gmP(*|=+U?i{BJ=~(`HkIC(HpB6V>}CkYk3;9ntxS z3GSG!G(-}1Z-IOohq<3mIM`|3CAUo9&|-Sso>a6hJl%bQRCB?R zy^h||4!`~cLc=jw^E%v2xA%%rpOnL#)K`>h>9&bU*<)KrPn7?`oV|7{Wd)^R)n38r zfwz;rj1*&>-w46@n@!Y`d&FP$|$3OVy75LOft@p1t%da0V z$9%rdCLLdN`D$VB&(f-gXc*c?ueZ>zT)Jv}gh>UMW)KRY*}PaZw%+ezqBs-`WR%Eg z#&0s8kt=c_zZ5fX+9b-7K6Jw~m?gT7bvyTbOgU>-xyh&i%)qHbiFQ?`y0!4ppN*7@ zQRmakBzZo5L7z^2VpwaP7xqpaDb^6A1{EmrT}f}MnQm%jzeS&3O_$085(Nrwzs@(5 zHw4(Wsc=sOAwA}H#A-1Ix=@V$b!HyX*>Cti@~jxTmM=AOm;z<&-W46Rw<)v=uilMI!{2R#qHgY^FZDON+&=gdoNQ{tNm; z*)tDPv;O+tpA?HG=Xh(=CyGAnCFo>uH%|#cOM_pu)|#?#Z~jisdqZsQPJy5;&osH~ zRb_gz*(08r>8z9+BUJZIN;xmp1W<-sWf2 zN|D(48QXl`A~58+dd0(LPRk=d(t<}2iXQcan#i89kU#=~gkFI-ZrCYnGIl%}W2K+k znibtbXR{7z9M5)Mmd7O(Zu&h|Uyrl9J*a0qSUnJx`_XE(B6vOZf`?$S=qgu={`&ju z8Sq!rt{9IPHhwl9t!1lR=cI+K0LcK#%8gdPtIuQQ5^iXmi%A7igB5=$XJe0B4=9xi ztb^}V2&nWbznw*(n8C<7bS2MWC-;b@HY?19b7N`R?*jP6p3H=u(kP!)+H#v_L;^cn z8lCv;PtvuM7rjSLPB%ROQlxlye z2%>sb67ab>GMM#);hT8tN?|P^?J_BxQe_dH{tcifpld8UAVJOk^=k}wy#aa|$sk2|b z)qi9*RO(jx2+lGu#~1TMZEae81v1iMf0bZqDi`381V)Hkl|3^?*hR%;gUg1|!Fk`e zCOw`Gs7%uOAs+lZn#JgfN|DerrT)h@yWdm6#R0b6r%A^Ewcf(e%l}mH0;h*%<7dh1 z&K>=@WM*rS5PZ%y5}gKpI#n(HA>{eu#o=v2NCl~OMGw1rCy4~TpBQ%Z`||QS zZJr=>p3Y9a44u94MOV4-IOshG5RZFDVwZY^NzILW>xjUuw?{{^s3&UqZ3W0H5a2dz z_wdM-bC{gG3GOTwEx%V^WFF9Rww@SL=~Z^!75T*-TEc7=m6onVMYq~1Nh_Pgb?ZJu z`}`5p_SWR2)llWR^P_0dFZ9R77fOhN*rYRWB0&}%#o7@6mw{qrQQm*(jux!1hBtgj z!Wq*v6V|kaPuv*srv=d`XE0?!TYzG;Lh8yz;iEKtry7*O?=C}rzvpZ6RoncO+e({i z!ipagavZ=%=?$Ssw(&`!$xdHy?3@OB+Zy)CSpJG(LA+jWc6(UKO7g}o2qK9eA`#0+ znM?QZ)c}Q|8Jip36?@?`G{nsl)+WJG=IfIcIoP~LBKdWp~Sr}6tq##;e@ zh?j};llaO*C&PT0CC=$D{#Oy0o{)Z`E#hFv`|)y?QlQ`cB6i8HL%eJ-Y@Jmf(pQ9r zrtMSKCO8Cs6utKt^`sU=<)q`xi5nBVJ2Z74Tduu*8QFe|uzk6$w0PpR5~#t$!JQJ< z3%I$*S#r>J{w(#8fDw!WirO*zt2NDifofGam^mdCE5L|$xZ}USLzu1`I&9i-W)he@ z%ZcT{sjRlwI}E;v|A}keO5It-4oETLvTaszCqRFzt$R~D-vx6!wF;i}LA1ni+6dj_ zU6$_^nX)rs-QziSNij^{u`uouu4QYwN;uty&;ND0QcZ-{=DcZm0Dfdi0owbcG70nL zEZ(Xe^B`~NaYxqO7N1l@QDww?>FxrO`38>^Dw&0JB3^Z@Ig~vxneteA|M3mpG8XEF zqec7`GE7EpXA(vaXqmZRA$5p4NY)z-K2K}1%Z5D<@iJEX+863S9OP76waoPq313Qd z?;FoL1$w~8ksnVN=eVNy@xjz%%cH>4{(p%5oVu%rGl(wNmxY6RJH8e9WXQYk_x6=Hc1u3&8itZn2ae$OgI5bX z^`a05F5^y&lJ*$01fVW=h!4ofEOUO_7f50j>4pX0DEe@pSxu@fu$ATRYzS(WzNW=x zQ(UZhukFhic8Ia&tYl+N_+rJa#{4vo+ukE&7KfM8u6<_<;TE?<*h2dD&foG-n-Und zJFIR#Bv0ew`t{u8*gxwl1DbZ-<^iL0 zlOpTI-qn|)W_-kY4@VsFHW4P^!Q zytFvp5Zqt;xQ|ehxIWzw%W9U89f__QDibbuK?0teP%KbxX!xn~a^%qI#SOeU)o0i?!XinF{8N zaFL3WkZzHIB6t{PceS}?$RZ)I;O53OMZCoNKO(JC6{40S<0&J`13SIbS-Y)pwIg}r zerM-j6y*Gse|71Jn#1cB0$gd`BhyxNcMv&#Mv{i&y3Bern0M5wKCB{{a7LyTTX6bZ5b& z)z-VgXb0Nb!KwDfrheZK40-UXuY%+Fk~3=-oXs%o-I+LeG*@jqUbmi2pLULh2DC~{i_(RzrTjdE`*2kI zGIM-F`gpZP=>FY_NR^wn9z#QFs!f|hXX(>gEbT{Q81OJ9G~}qR#C`a%Yj^@_FU1NO z-5cUl-d~fT&-l41XW#S<=^gwH`zL+$7elSfZ*2^IRdtOqfQHH7V^?@XFyI_t5w3&neHl)3!&4fAkXXgH?E1=6uX`eSn&F~HhkXb`qU;5W`vp?_`+VO1Sn+bg{x)Cp?08+E zADcd7pPDb1f9R-C>H7lH(xW6Iow=>!+~>lJd1}%voP{bIO&=616E<;tvuk!$TsSH* z%-eT{CPQrn#BHB=d%)~ml1fzKN7px3xFH8nkOtphFQDXt(DeE90UxsmhVx^+u36SE zUJ1B6lSB}iLiO6XyP%PpFU!RAnxU&xlY(vIr<`dtP)zf`8_i8r`xWu-fKFHM3e~{%%gsr1>ps-c+v9Swo zdK*DI`CC)U_g@PjeJ!#aMF(luJB1;YAi#gn7+6O57qy5 zW50Cj{_}n;D7Uotd%X1+RT-hiV=_L5)FXQ1QzT!POhQs{WKz&!+}lA~nFe)9=-F8N z2(<4`LI~m7?Wj~^VEmb7B#4918Or|MiQSFqA0B7U^@5V0r)^RRuWfx@qWdgY@6I_U zy7_Q>;S!m^b}UcAV{{rRUhYNOVkEcEpTT%1tP)rMhNR z&ojy1>IT6F(e$USLdYPLa9NUlRWCZYO;|mqB=e(GlB4!rVR;^c>=kC1{=jD;RC^Wak`lw8rrab{m=dkMOku@3i1ws_5_OcNRdK6QMQMwg)R7O0VCz;dELCY)aP<)f(JB5A7}aH2{gM*XoY;5__y3R>bpSzTS{kyZsd=1=)vbnM%R*-k;ta zR9C2+HgEj!gL;#iuZI!J-wz8#d$;{$>@w()mn)yT{#)&_t4Nw=dI2+9x25 zeDUV!)^qPLM-n%GSRIlhjAGKZXd9O_=M(Rpn|;_G0GmEyLw7CWpPT}H8TGbUT9lCR zb^S#tAjrmRkDyqXE&SMW!vx1x?#J(O8z^J5tX?zRN2!(*&bRPuY4v^T*QGdzDdKmA zZrL)f(_C1+@i&_dlbPghi{DE0MVC{~9xu3usEJ~!1iJVt<3^gk&QcC;#?$7F$`bt6 zcKj;2>6x|L`+OhfGI54t9050nGMCc9OOF%s#e<;hb>)N3UHF()Q1SWO-jKZyA!|{Ux;-fozSzXIQE7QOmDwBR+t?dgTJC2o z8C4IkuhfrLrtZ6QOeTbVT-i%`GEw5Je*3IN-C1PKB%B%2fS`Od;31UfU0v2=1iGYu z!kY^iS|w~;)TjhpJE?Ezs`CXL_{J?*JJsn4{gvVL#OUqq&TWdbLwb$0fqeoi=Wvef zVW2y)BlYV^!ntYd$ey90#BUVJs0-$KAbiUD{)uZ97SA)lhL(PRvK8i1lPs0&v&&3bJTX1@(T4=inW*Oh7vwfIP@Ml{`c0H(?U8)6YtGhC# zRr?sR5YL0>K}O4EAWTgU+~P=X^&aZz>fOI_VL5!CCtlq0E!bd>@1KnCn@1m8>m{&u z+pV&c8|;-~+>Vb8`0Q`u(8$FDrJmiVpWICH2sLAmOW+miDI`y4m1`69_s&g^y6fDq zsVkR@=w)mn-dVeO)oqp533c9CJZb+Nvw3>wJ3w#eq=f*}F!zPNoooil-_;4RcM*8| zh;>qOK=Q*t(d$@^mIq1v`qr&2GB?k}br$GiE5#N(^RA|K-ef9Sy{aa6hFOPAkf^Na z#H;x7%oVG?(=Oc!_Un9eUA?^z5_j$-2fbT+W3@}DTFa7q+~ItGK8~ElsANZcqMTg$ zOt|+E9xC z+>1}^rfJtS1^GpM6*ZA|h|*z}R@YB(b7c`s6$^YT!OB}8L#%swmOuz?oC}ihg&#pC z)lmnbu2@dway1NmjbiVZZ&?%XOq?ht|7r9kmG^Cl`xbrO!enX)w)g~1C8Aq5!bX=j zd*t$_J*nX@VW<>P#m_3ywaSX&xjfc4cS#J<% zNs{NqMaXfd3XfADrHukMd1leUj2@+-seIKO#Hds+I`Tt_8=FxA=i9bMsN;FJwoLJX zD@$Zg*E=H2ou)hwJDyJCA03;lL8I2R+1^Uqlz!A79*#)6+qH(qZQbur(|nS?az|*6 zWD;HoBj69;x(p~p?=R@vY^8D+Je%~4tADCl;PHIUC#ZeV&4m_0N?;MeP3=HdtJYn& zp3uyrNgI6@4O@G~up&}IrR~)#l`O_h+V!T4rVc{tS(YoNqo;^AP_Y z1ofZRlS3{*R?+7%fQYijQN8l_%3gujegG(MCd|@D&LCtc)OSwiE{+%^qD+1*n3g2A z2GUKNMST*be_%n4yYIP+pQCKoAj)nZ1etg>NNahe#Ah4kA!M1HgQb#HDne*<%x;fW z%SdF!fzTTYqUSE9KzE_uaLKI>hNQ2X2Kvu^|4uYk!ftF$1)$r%&P=@kl2bDiPT$rd zA@Pqw_mdf3RA*cFvz<-@TCK3|urC$eG3mqS4^l()8R?f^xU|@^LGn9iSw3~QeCSaM zlIykyAoCJe1TnSz_ScHs3&~R+BqV{>6Yyj+-v)YaJOoYWzk9V15;@Cpz^i6Lt3C!& zM(Ra|m6=@vQa0aI$ZrJ{;0jDa$7%(1&V!UPRA@dSYq0}8rw+mbeA~oo?*y;;PJBJu zQL=a%;_PypwldXili$Yk0~YjMj1hFVrn-*%)bn@RZ^Z+zeB^5?7>Op`Qs#7Gw+IcY zrj-uqT`kmBUZoTY^`w}ZJ8`nga7~LfjyV26QBiGwrY<22uciPjoaaYX^XiTV(>9w+ z--}x|JA8qBNzShGSIH>__25x=G%bpJCkLO11t$-UZR)7gVAqwUT4|bXEY$Hz?BZ^g zy2lNSN_sA*ztpz11;jsBjpKE)H#TpF1`~1VH+x?yWYn$C^cimDH+|^hbIl>Ocl(Gj zv96Ex7%m?0iZ!Pn{<1SH;{QYsxkqfj{ads`a)oCG-+owOi-I^pZ}XWMl}+pug#yi< zl#oi+#w6SkwRjXeC_DjwlywYeB^BW)1fRA+c~o`wN}DS2ZXp*ZZg81d&4b=JbG2#pkE;$}md>dINA76i$2Rj&9h z^p&qFby5W1c9w47tH67zt4cd*qdq)Qm$Oq8M)5Y-+h8RiTryqnLgTCzSKaA8#^i6Q zpv-L54yH5+M2?+VR~KN_f8i?>BMtAug44l4Q=Rao`uOjOWq z(w+|vH==a*)N0;nn8@U5U*Zs#J`0g~U{gCu*SO=n|C<&?@V%j22xnT8AGp4m+7* znN?p6u07z@*5p1u!3}zQA77^IIo4D<__U=|v_0ToGEA0yWd;z=^8{KGYd8}KM9OEG z(*U>azbpKs=3fT!e^<^+fPwYD3($=#YCs9u?*c|ptIfuW(~0oG^)lOiJEF~G-+%rl zyb_qKtnrXnDYQ7v12ohV&lq{~w*xY;G|v8I<^6Ywy#DV5-oMuxJ%FM6-#Z|q+yCi> z_@5V&&H?H6|GPv0O!R**GWLeMGZCgG zA&Jby$?kGTGBeS)`6+I+g$c)NWCSR}|E%x`r9+kpDc7p~7VBffm4ZBf8CqgFl74gN97EF)m_XP z4(F}mQ%+`%roaCT&zzNbWuT2%@lX>l*C|jIP0dm$Z()%i^PZa_iI%&Z*SyXgsq}C2 zM-XVM_NYo9;L%|#JSGHO-RI5mcUXEW8MTH1PO<`kRb7m*<|Ld`6>pP3hI z+*O{j)|O)UJD0A1s+Q?5MG;sU+1a57O5EUCfbtU*^WIeejxn!HmXrk@4 zY16uhqNIM`mJ8O$Z88&%IhqF4z!=^m{Ji1gz>uGRKjoLnKxW!HF;|Ni>P-;pV?Yzh46^ z))nXMmQfu);ntJq^}3^93Aap=ZPlFPMbG_W0lp_GwJn6`zMIMG4p8jIc?`ijnIWy7 ze18wq5ZveNt+Ud{{ImtvJ!&Cz@p4>RQasapKmsIyxf0B@H~TUlME|lmZq-@U16h<) zR;cIsl2)4eOgZi%X-umCf^Y$u34e!@l{W5@8 zm^A=wzK{LYV?q`u(3M}cPN$#y#r1b5azgJu0^SFx2!8jp`YQz>M@7h;TIaCgzMli@ z&LHK}$>FMpKsV9wL*QXO*HL%Rdw0`kybA;p>52J}u3iMl9Ji@%<6PthzQR0|eOxat?KAtlZxs0JCa^QW1P zXt8Td6gK&Wz7b;sU`pCF%zZIZJX8Iu1&T5$*;zBz)fi4vq$v?-2GW z;{@g1El%o*_lOfMycLWzE`>ig69fx#@{wq(&X$uPU2D0_wbeNh;Ld==`W=Lzd$mCF zDNxX8)dD5|)Z*)l)gYr&c}p+zsT{z3d0>SPmRFVZ7|3PUUT4^v%(1YTo0_H`5h23^ z%}ls}vS)|vAU{ssU&l4BoK5*e>YXdt)tXrkn5G8P>7@repd3}BzueuN){>Kpu{jTe z{%q{BQ#qdqsP8wyW&=CLC0eYg9y8@S)PGdTJ%8l0Rh>l*Q(5FfsKtJ{*Td`g7bCTa z!(|Z{F(P>wy1zgw=U7dshn3Zm0D{z zse|ox`yzINU5p{0$oG@s{{I`}4m_gs;8s<7E!ZQ;PHD*h;)~-NwEyW9fZO&4ouHUx z7P#UZayoMy%L7(_0SfQ@Uo&M8h;wF`qZ{BM*-u8LK+pm+6`r_k|LsNZ6Ch4=XKv2Z z|K5JqU6{w ztcUFGlbnIjZ4+*Kr?RbFw6zCbVZUQEH!iV$_HW%S zgl!LZ)Y48N;FdM`aer+Dtt%1iEHK3M+}uy8X}<5%X#M7c^ZjCt@g567b~+i+a1t!_(tY%xIyl->7%B_yv>2*%_;Q&^39CsaSEU0oX$g6bl;~M z7NVZ}@ye1Aji~2?G|^?x%MD>$`@F04g$84FL2&TRGwQCS*$@wT(7uw-eCiI_%K67v zM|8`sEhF@U`|M%F0D_@U>uKwA`}3_6s!G5iM|jxy8^CJJ)L`(zq6xJw@u*_WCT)`n znfB3%uuJlYJeB=~?utQks%pV-T!OC36Id#8F zHfP#tyO4*Le^qWMjsN$IlW4^CrB_{8^WB}H*e?H{D`w{BxFL6bKqjk3uN$PF4~31j zQ`=BgY&c%sGEDLPS(IoK5)?@BLdep?iQ3|1m4dX4&E;P zD*IEW<+<3u&eq1i11mtF`vt?HuTu!;?Se(&@``Gd!EOY#%_^^Vz- z1Z{}NthH;@cuTd;Mzg__s~A!cIw~nL!re?Xw)v0z_ba@w-!b2WwyY(JVh3?;_8AOl zNE{IrI1!Z)iZ~0O|C5L4@fs6C#+@r zqEa|S4ZI3os_!r6aD9ZJH$O$4mEpSATPIujfTpB-4F5y(Pcb(0*;9I^*xI}1V9UJB zdTpC%)L^Q@4^R##;6;WTI4*cX{6tE?=LTGTcQ>xqZ~Sa>?sN9wmgFtm4>(#~Mi%Xy zng~9^pGcGo$>wbb*G#I)l!fi4wlOqeQ?T?C*Ol4(Wzn7uVL{+Foy_3%)uyg(Wr5(A zCUMx}CX02}`diAj{pDpvjY0060+tOu1Ckv6J@*$P?28I#Qk)rwqyUdcEz$@&B@$8Y zTp#ZZ4D7X4vqQFihWiLm6{j!UtN}k!X#V^9Y4!cI`!)e2E(P!DAZKdb(qDnDm8y_! z&V=asNg)VF!(_&tJ?uS3^k0e7werh02dx+=IG=P7l!)$TptY$x)qSY7cJW^T*CtFL ztkWZvp)Zjnpw?`a&tG#lQ1Z-8F7485<=R>v*iFmxMvzIjrhE^-<&brnjiy)0aFU%b zpW0J3moL2TB~Xc0j2)K?oA{sU9MI4uz&a>%F_{4vc_11g|9!9m4DD2Su{Y$W;h}ea)(Fprktol@9Orn5$z1l$it*wC1 zji}u3?hBKStnsjQ0&YmWeBYN{@FN!IWps#>F$iQ3Jj=;k{{TbXLE%0H2F|PfP&7{J zom$FCW#KmRL?xCmOMRWH(fXEIB7Fee_D9<$fhXBXc)!Ng0c*?H;jet>cJ)NR$NGKw zr&eMQFr;er_942e!f%hLB%Y^F)x_JfbcAK|^JS$C!BX7J_qjfo`jtoY^)Ujgk7<+? z-wE9g#3q)KiVZ*L{tH)}!gLZ~9{jOHmYhb@rwaunm4B5{PMMJizgbDs^Vvtr!g(gj zUt5L^PU`obh-rZPS4V$hRS0~h?&_I=D65k|W;%!Mrzm3E7`{(ek5;P|tj|uK5Nvmh zs%7M(%frz1ZBhez`(>htDyvJOGx6=FUO|j>sMOGQz6_Vw(F`zsAz#w*QATaPDSFthV+~30XzrMW?(Wu0=gGT- z@d{FIzbh(a6xJ3tApC(wOi%tWR@`IKvG_*l`ynRkgW%eXV^<7oo%PoApa-bU` z->3fyHQ;`|+7lCF(<%H4GO`ZYZ6zM>HO?PAd$+g58P&2p^>m#`B|{tv?g>knN%`-6 zz3Kti4I4!tWJUD2fsYn*j%he{oOuMYckdo-a2wh4WhIDX&TKi%gtU_X#k0TWRl;B~ zEVme&f)?_$ENz?B!f*;LfHBk_FoAm_W)vtkG1z#!VzEqGxP=k`brzP84_m^`G)%$5 z774ckOnzVChwUd41!}-;5+>@2wE3n1ZQtV>CC^V@53k-Csn6OSl~0_E5gcwgXVWc# zY+IV8{x#LW?<(+^2O@%Nb4|%#Pbhz?LG!$bJk2Tt`cX8r)hppJxsa&-oPrM<^*H0< zePnUo&Bbq-iehMSzxW)%zB}IH1!x)jo#GE3?R_%zQ1kQV*1S0q0re+4J2v4ZAR56T?Z{y!mn7KUdx$Ffo!rU17= z5pV$iC#`_1TCeo~Nu}GPuAWo8s$UUYy`l)dS_WlLTQLt{?!wIj_fapjZKK834ttYp zB9P>h&zB!4ic_L_+nrMkGh8Y*=R{e}$J&s`-cbLabX7tB<3!&D+(vt18*T#orY8FG zQ1TvR`kg9JrvrFn_JH(5Uo9;U#3HlrK1hBYKNxaFC)a+aGRuQ=S<98tGhRuLYGLKO z^Q{7(?&>gU-(ewn8gxVIzevq!`Wh3!Wfw_xeVG^(y?>ksa2ca=G+ECbQ0tnc}e1AY!_upD6(h0JM)=vB;v=BK(PwVS z_NXBva>%D%Kl4uevyMD%_k#}^!3lUp)H7E>-FN*M3IQmH`vEv>Ic_#VlP_r-cD+t{ z#5Cv*XUogm6k9+Aj!=(I@AwPdaYV?iTf0ie-#cd_^hD~_M6?h#o?<5EX_3w zZd^9q5QEp|!;xe`>m56y6MS*$Yp&zv^blvPfr{taFAV2-5HtQ$B%<-#R=RpdmPy+0 z@sS6EuTD6&?8_;tQzK~1pc?|f=G(iz;N{T__fZH5rrG2G=Z$kjS}^RVczKEYv1L8% zv_+IX_?wB+hXp;)U2=Idcc_h zlgF=XLyiP3!2bDdTfJpcjizZ+yb*XN5GVV8 zeTo|o#Rbc5mskHeAWlnjoix-1Et5x~a?i~#-Bg>hlPQ_3fY3RXFTL@J&+irf}xzDxsi-%9Qtsj`K>?({mnXHSSxr{BT~= zN^?qF^Kg=djfX3|9lWr+KY?@~9Uz||Nw`}~5zp+l?XDzP%>HUN%%|BR?S6lim6c)v zZKZ?-)d2F)Az(>kQFJWmZCSm}EQ?1ov}d0e-t7z9i#Z+f$H#7iu2Mp?xufh}8e*`F zIvG9>;GChQ67Mdc*s;uHcahB`Ka$)euObFXU3lta+PrxHrSn+fcnwfDSh9aThLwMQ@Dp@YX$96t*3A-* zLFdz(YJnQc9p^rp@kdBwQaMDY(AS!7(B}{gi{qB`YLH3RCLWvQeS%>-+*fXyOU%5@rI<_roAl(?R0_x%#z z-oo!PV=hf^$P+EbCT7C^Hc!aEszz`18ROy3j!KzmDjr`LYzV9FjdFT!iF;R8$kb8X+u9u}Xx!FK-jrG-4V}Gmbwb@ld!wY}H;gMlG2o!LW_owhFl6Bhs|lMa z_5)ZTVm64n|y|6~Q&rNCY+aO_gT4n$QP z7k~W*Q9cQMJBO6uKABoU8Ehd>^-sp;?CYo>K0mMA!1v{ZQza1pOA7{I@cQ_rcJ5lw za~o={D#G&jUiW#bHLiBA7e;f+-z(Pa+<~+4L@1~8xU5@MVJ=C(+_ujgQxE9};O1vQ z&m=Xs!^A7iQ(I$5h} zW6R_9OD};^{HBO&SM4Ex!_3;}QM>I>_{ltWYb!mj=W@!U2m~>BX|SNreRrC7rYShk zwCKhY(^QGMjS-|CO`o>fQ-pBcwFaA-g|DdG5*JDc;fH$+mZUtYcMA=|Zh$XFWgqZ1 z2FXT}p#!-S7aJHmWAw~vhB&u%!e20cial@onvomNtr8($TAy^Y(PN-cxau8+-V#RW z)QcaPKJszziAvdu6MCQY>cquvj`+OL-1>1#y*Y=yXV;9hGD-=#LrGo06N}em{_QZP6fpSnw{h{2Jf}~_ZDTE+#!Zm~UC2JlOphjqquZ`R z&d$;t0Ss_GUS$F}0lJf&ym(93`+eQPJn!FtiUbbAc>MZ1^Eu>L}Z4`^3N2>Z5LZzYu~*KHP0<4V13d~_2rd1^Sh`P z^}t!6a8XXe(TZ)o$v5{H;SNvoKI&-mSj*Pir@IqjJBqvgHN2y6WOWUWal~#SEUgEH z>j|2}gH}yLz7}LC zX`;B0y`~~~+HUGjqPwo#%h$_c5Bq~^gesojUrIVWlyiA-HpOsk%jL!fmNRRj8|}W3 zVgvMbsX|&{$tkfTFrb5qcl?QA9PM_?ock71g~!hz<4B$wm>l%DNp!&Z(+&H&?>jsb z?GWk^eSpXBkT~IrY2PfUpMKE)pr`l|%@ljl{dJGy-_%FaXQ%xq($?Av{Rr~UWXmxa z9Nt6Nnxm}T88(vfpwIa_Wrfs>eB0(MKS+p#8|b&K-;2fCHdB_{Xse%4VOS5FYK#wz zb=aLPBe>`O`ZO-(!;Y{S*T;HHj|^5`hhoo>Y@6qssWDR{zJC@?^D8=@!=$ zam==yV__&cBp}Hva6f{#Llk%TQ*q`2m?>SHgO)G2Y{6VFiOs35;rN*tu=G+srf_-6 zbZGyV^u>HF`gA&bxn(ksd+~>?^fHsgD_5BB(-*kDtrkLQ@68kUmlfMD(54RkU+W-N zH!Y8>I@&kk0w=3^>PkswpewW2C6Ob|-o9@RCUUwDV3iZa-S=!GKW9+~JR8Xt>R)>Y zR7$(scvTJ5!>FF?52AAN+&6@s=?9?2XdsW2vcxME`aZSUh@I!|{P9Q0Zd+<8T;)ys ze*UOYH#U7~KDh(P0^CC+4_mMVu`qSht>hqZYPkI)h4Tm-XMF?GAM(=d`L3jj2 zF1M>E3^L{khqth2)U#r^;|4A2=)*xPJ2lc}tINgb2D|C#BdRx^0s+5TeYcW=?HzNq z4sZQ)fGns4&RVEE9{O@>!rM>(i;}Xe*z(b7BQWO!_)>40)R*9WMtiDv(>?_?cl^;n z+?8w=U((dQoNdR6r1k07;O&c19;eyi^T${cWhezkgE7%5@E{lQX_nJ|b2PQ6 zXlBcu-;&apLT%ag>88<{BINM?9d+-k^WqI2Os}%<_CzTvB1TP!o3t+U{z`*lWq&2g z*7tEV{><0{N;$;*{Jp|N1E|QePsq6mL$^m!sfgFTdq-ZA>@!5b?i@_d?Ie7;DgB=N zuVMd8t|DpO3(FO-&^G5Lfm<=HALdRmMM39=WAjdN3ufPs)KzabY7iBmJ*^=zrv#dv zi0(8{qo&79w0zWac{V(N)FF=l8u;uaASifKJZW3Am>0(LI?Y?HCq>fm-7@CQj}0fi z3M~;wGY|jOq#kEhaC^rC-zU*pj|I=uCguP~cXgawlJyeTY|-%TM)*lQu+cN^J@w_; z9<^H1K#6O=v2C8vyy@G-;BMx~{j zFFEb<#qhj9f566-#t?$9nT%bGjSf0Ge2O3SMrtCh_^(hdkU}qK#mMoZ40ot;#GA#= zLZM1Hpd8^^Wn08oS%0j&x&xcA(lI|ZU^6DjGs!SHtABE=UnZ)z-{N}avRHUmKYsA$ z*l@~xq>TCtuicw%pTE00xnG&A_lVP0a0v+&D)AbQNx>cbR0N_s0X+647vc}H+O>7~ z#ZO}1^kqZ}86&m&_CLfipOl}+puq9ZIOE@#el0m~gw5YDN;Emo!*)##)mpD}HP7~_ zC5)oDTh!ryAY+UsYH6$Mnj{(r%^*Z|Uczb1_ocqS8zIww8LMg2bnJt%G42wy-BjY&T8%9RK<4 znE(D`h>P6Yk*(ni*)B;#&D+YMO*g1}l2{H@Vw$LEKY5e@82p-GgQy>K(4UfJvor9M zu6~RZmH}Gu?nW*EY>ifD89B4yL&CG!L<%0KjJBd~^g1+ze3p7>WpEhOAW_4kj7*kS zx5cd~g*p``#1H$rXR#3Zf@XIbzJInINGWu2idDBA4_d88s1B{kUn{JA(8?AzlQ*#@ z$Wd`Fzq{;2K18ejVw3lwQ=bai5&%?{u0CfuOlaFbDh75->dA#s4J`@pJ+MXBjv(;% z0U7=Kxuo%miV02rrQ|Hny45yzp%$mW{Wh{ay9}3+cyZ`bvW1P8SjML%J7kl?=VWmv z7x3lnxs>C#1y{v{Zq}=_M(D*CnqFQx(bZQ7pNx9lY-r2n;lj%`CDuhC)WZ1%y_Eh`E1HCYWM}OJb#TXzo)jOW7`DWA-zHML2?Ok}g zx+vJe(6wG_XKy$DRLN3+D9??~0{w;JtHP?m#%=HOL)q;sP!(iDUfU$MyZ*_x;E3{MVyS=QG~x^<1x}TP9xJfcPWRybRy} zE39v!wr3z7z>aONiL&O8IG{!Z5g(LGHtA-q1WAp$mKAl_n@^0Cyb3oD?V*Iq6ltp6 z$zZ0xna)OB#1q9b0%%nq>AUL|wPwlAM0;L||1RMaS!z-xc9m=d?KC``gR%5r$%+aJ z+ZWZyAcn9lES1utOj#~gO~o~huQl4AwHCEkF$7{ z?XgM?N%PPN8}qX4=nh$NwV-;+z}Xa?XzI_!D6(4RP?>W^iT@n<~v8fu4cLBnBfUo_wMz<0sZXpOh%+4FK>4KuWs3T7W7A0Vr;iabX2|% zYis8_igHKkkQ`hjpVqLMRq@c5Ipt^e%tsfVCs^pcXPjehPeH zj{j={^C0>c`D3ohwp8RE0a871V4-)oxIL-|7F9sSO4S5neKW*yk4DteuB2+q`Drrd zQn9A8%Y4l&-B?=j(;K{68H`f3$VsbyPzy)r)sPAQNw1jNG(f_CqC*s2eC*TcF5Bzc zdpG^96&!W`3wGOLH_$*h2aL^Vlh#_3*V+2I&vfI#=Sp|O4SKmcn zzh~p%ZX~$o-4-^|?+A`+_V3?kkP271U+eRZ_nxOOFqZylVV;_b>DSh?e+Y9KjxM?4 z*ZPt6{V=nc+e*JFaH_A{bjJ=}V|&Dq-A}^Ef8_Q!;W*9uUMb528;iORDw}Z^7!&$b zyf%v6U-tWa-stt1-s~9cvIz$e?kmrWU3R)7lNfg8%!RkZTT>sM zKK1>HH9yoTpTY@hFPdE6aX`BDUd=#kzl^f5H^zSceD_{oBWajRz~PDpDm+VX&1k6m zACx!l7DDyo%TVhSx=|=+ zs?L9WoV!$kMzZmdAgQ7YeOqkOQta3nTpamc)~1(kVg-&VR}<_T@U)^ndvxIRcCpJT zfVIaAe>$CVSXQ z@igE zWX|+ZwuH~M-Z*2jR44+e-nGAgt=Ijwq6`a3qCYC}cNuJ%Op8qCXJ8K4$wr?LVL%_j z4fQAL8hu@E(8R)k1chULO4P3hDZ%mPRiAIb)@`LFvJ??fqqIdPvv4IR;MwXv22{m) zb1yOGT*LcLy@KluC-E1~&?lphS9`3hw`>(6pMsR$KsWu@0xH2D`n7@V^xK~`ccnV5 z0$x2FPvE7bu*i^Hcu&`T>1jbw=G;}9<^8ft{-veQOJTCaiecK%zH$@M))kxb%=@*C z)5VB*$_{!E2o+z-Qii5a&lVf!U|5`{;5Cuaw@R5Wz?La(b0}Frpg9!uODn}Kl9Vwr zl{!9oAW!EjfU|AkUmX8=VSF-{r(Z$eWs1l&ginx1jDgJOJ{G5y^-0@+Rui^OEY-kq#AN<~*(BeA znLEV_)Ozv+G~?0b&GarB>3-w7XXwP%4AkCznUG|Ih7~%G#{4Kx$=;#b;xfr=GB%?U z|7czcAXHNaU8$%he?K|~nvfy326Suzj3r)z77?Q~SarH|pFV4U;0td%leod>4V|sL zMG=D~{tpmTRMxDx9Ot;nS-8adu|i59j=IZsX&AKQ+XR zY6S>p-2zURWo6j@PqAS#F9&}wp=>7YXilAM6rQfY10hQiTP3;_740o45-Vl3n0OjCA2@BizSk;;Q zj4y78Q<4bjlq>890E?_=w3cl#cB{8^^a}qXQ+0-`{n9z&+E?@XrpHqko?{SWY%2lm zY?8?R{e#N&Q5ii;-5L?(F@(xv^ihM1P^uV+w@KU91#886*%*10tn5f_v8)Y4z-2Qe z%a|tog5aAqmq#1F$$v=^M)O~My3P{=v%9~@;@Kbfhd-qL>=$fm^0Z{ip zCw~Yz=Ybh3B$gD82#0SJnzsp*`S6M!Xf&G(?CjGiIG}hA>!gfiCe6c%P5d|fvC97* zkMOz_fo|@<$FL)@HH{z(FXLpNOjz{fE?L&?%WIIz>yP8Mzs;2*GS(CqE;_8b4jdRP zGVCSUd-EVzC0}9G_dMnt9e5t?SUqn_;aH=J9@=F;cVtO#-4hk>(QPFSdoFUpX@vl1 zOo+he&~rr~W|bcD(hXU*P7V%;@h%T8D78ft4LxnkkTQ4Ldo<^yePKmmP}~f&qOx|N zjdJZ&HgA_b-+N-eb?ZT&)54ndeQDt$jw3hT2o8Q0;BHPSs$@R0{x=bW3|b4ZwR(6H>}m9q~MNQ#hi9vreY1RGC`sKEsN zlLiQ7hg#W&^m^Z%rI@YUhT2f>4HFoQ

sk5-&=osRDlCwlWpBs8>&%hO>H_h7I7A z47;ZcGhRWSfUD2yi@GIYFK6J9OpuAus}6@i=&Tj49P8mrE8T`&e%IhjmRHPZ|Gm=N zw8D8B?*Ea%RjOGL%siK$2H`g_gLPhfO=MfIEiJFSWUUy4?FoA{$YzL6_AvgU(~z_I zuS`p&V;YS|RUgZ(Z64rp4j^HRae&0ALF=bo>0cGKM?E0pKA%cmQDkFITz(lu!=$OF z4^k64qh%$YuZ&%L06+BhqUe)29R$V2{CV zZHb&%IjH;uivrAdyAb=}*i1)as$y5{ZC%U+|0G6cu+gH4jGK!WJyGq8D5nylH-qm! z3&OAWhXO@upZe)*;ezjE>%F*6&RLhP45-2Gd|~2hA*p)m0NOYe++aR_CzXa->^r5K zMQSvBv+>9i=j1gB7^Z(pR8vLf{0LA$hS9KWHl5%j#k~9M&fONr@YNaX5gPaYG`EU< z31GwZ=hP<@EwpQl<%dQY=|wjJol##D_;GLtWODVk4nN*A0^W@p*(9(o6PyG#|BbiL zBZnHWNRE(~=4yx~2saII7zvT>|6v!Rg~PgFd#l!Z{=NxAwq0#`oKizBT~RDj7Duo> z=%O<@*?Q=I6yWhRkv~mb{MD|!LVl^MtJWb18vhBz7Z;TSVsP@HUCa=(A^porgEC9X z%)b@bTMO1P^)r`Q>|Ju`9hSPfZXVkCBv@hobTAh#iq5+O;t_OEx!*lmdu)(aviD>C z@lT`8M3Ce#QU!0#dx=TI5RMM?4583h{>gsM69Q}Y1L?>Yw&vTPfmIsSsb}f{4 z1Ut4h??tHZ_Dc6gRp;k0Y&*nw3O<11d9LqWGC3biJ!SG41Y=SO6~p$vE^x_sDNTdOhaH+3NIzjD2s$A`6 z-@UBt$%_2#(i|$Y3`nC{a^lOUupMZyT_)nhwY|8kzCY!G-8^OC3}4J3e+k4Oh*}1E z5>%X+AM9p?fHRny;q-47Y*!uEee&!SGe#YfOAYnKOvvyvRF}L}PiVqVBCY73uj+#w zzT1JOv+{acG2Xb3xf{O0>Ek1~R(%5%&$qdleC)usg@%mYj3Y~mS#{!|+Ndfll1lO7 zee%kDGmNj1AkGefcx1sA;P$;F`xgwq2Eo*ORb*l&s~bv`jO?RSDU;fx5MLl8(ia4L zDVyyDfxr{c$${n2>9MPObL|MDMvx=u#ju!CVuEfnjZs$BgIcYkk>{vo%Pvcf%nqKf zti8nyhMAD>TX39*e+9-YJpbo~n=>k`t z3-H0zkJLZS1r@{B5~WW))4QtTygJb-uEQb>R}P#mn+ zIIAPyZHaS%QSyO+X6kN)6;%Zb%;uZHTvu%pl(aRm%;H0G&9<%YQftG?jT*@>u;4PK z7(isKoWzTx3Yy37b?m5}(;b56JJVF~WA3R(X4q1hm>pfo;S=>cnfaGtUiS=NR}0im z2Kx!TxYXlAe>YsdZEL#@u1Qw2G>0$j@t>1>&`>f`;XUUkE-Bmqk=@pgHh9Q=uzIE= zd{#7)F@p)SOwsCMXjrOHreWozHzCR}f$~k9PDDjtW|3TzbhalpJTH^_lOA^TZt5_a ztpsH_m_GsXwZzDR`=u|?V=g>Ttt8JeOZrTnY`-UU-uh&1w7lr}#JB3w71- zCWlKm#ePSmtMv1V0D9(g0jn@Ct{mQz;+L4<$j;R|nQn*Xmp1T_=8qh@Bxnp0(nl`< z+d^Q^%2PKwYlrU%{=hiPVj%h?V>8=tf2d~1x0_)l=m~$m6`e{Qv^#~d{?NZuBOwnz zSJ3n3Xkh>){KCuBR_;gL1zl8f^%$A4}3 z96m~}4L1g$^9Pf_m7eanIeW~;cU|UG$(HgpuFAEoSj)nzKf$d0$p*v`iUqN95?i#Qretn#msAs9@?K7(C+kZw} zQY6W0_~mt{cdk}n5@!%2U$ScLhhnZf&HjR-dD(N%C4!nN00DEuIn-cLBcC1^yVA(O z8kgZkDZ+eud7hY7g75&zF~exb`V^`-w4cY;d^68|=F66kYu?WDjIvNEveiSGugFBK z&u-^x`ol?G`g}_XSyBNzQR`Wwo8Mee z<3FeDhpI$?9|+t7&BMtU|AxFD>ZJ+2zSCH9HAc@alCkb&=+UE`sZD$ZL>zNy%HR>= zEA0?<%RG~(p#jmm)JX4D8$xptG5hZ zSO|xid~p8jvRL+UXZjtOVkm>57!eZDGx}+?;G@|wRMEV8Mn)QYK*LN*`F_v)eQLM% zT}+|Xq$cBC$tO>AYoR>87LH zt{LaR-WBMkOqzT^m5S#3g)dAjAkRUWla1YzVlxZ92?Z# z>(?_0?SjQwnvra){p(4Q$kEV<1AhVXym!orEoBPs6#LsoDP~@OC1TTn9=j4~%`2OO z&fNqPooDbcNm;TcB2WI=%vTNULH{=m?D|~K`wa&?ceP&)ud26?TIg)PVV`AXdL|bn zoQj`~M(N1Ek*=gDcX@xwG(dI2FJ-=I6dD5I6~^@jR*TryUHG8nA7bt`QBI~tVmxt-Tu<}N*@?)K29yV#x7B<-O>LCAjEjcEvpVz1p$v}I&CT#>W{ z$^z5-E2?EclHulskmS#^vsdviUb#`JR)M|Cit8mvXXsRi4mpJRV8X%$XhNp$HMk;8 z-TJ)&k|2k>e+F=p2~JU|j0S2SGBm6QzVD=A7}iT!mVA9jYt1m5HEu=x{A`ygaGqQ( zPs~R|uQzQJ>IjtMn^d}HBsN2SAkSCf3NsFw@&fhAbCj&?WuK?gJv#pQ3sPGnAtih< zsYL~ov=%7}sUoh0*truv}l^5F|?fn@ZayQaWjeKmU@ zQ)*kEBX#U><4zXtFxg}}+Hc6G##rWIzJ5WWi4MxAAp`^4b}L+~{k&pVI%H3$u=X7IkkIop3a2vNZpv z^I%`Z*r+FZe%BpaFxM>l<*Y1mX5U4-OoyGns)UK%-h65mlHGPyboTeP*KO#Hn*R94 z^ImzQ-9T7MMpns?;jn5l)@{d@o)jYW;6VLd|K*6(JvnSge^j( zmzth+eT->tOI`ZYLQ8}0OU4XB-rSd}@3KlX*K2#6a-6ny%z1aGz)2CJo5y4isoSpy zv13FXtJkfayj!{y!6!v7VpHA&`Wsbeab3H0Jjx_Dpa!;SUtM?>o8CM0Rg+NWYnv-I zQ(8P3SK~aE64swAl(BYLTi(+usy*0iTv){A+F9?uS6iW-K?`(VA6*UE0y*>GV~B#M z57xS@S$LpLsK=e5qs6@3{ow+x#FE^jo7KI$MRh*zs( zXM~EB+wTIX$jo)a$DdRLf_X$D5+n5XxkrV4J5ko3KI3SPcb9a386;#5)GZ{^W6G=! zpZ==DJkC+VNEM!=2^cgHrR6VvVrGuk&-~}af&*9*=kKY$uQ{&~~ zgEz8h~}=%D`K+CjESnfh2^@(Rky`+ zw;I7B{qx70=*vdIqKqA(MRm)ECM=x(pdq`)U-!LK33JiPwbog588{!4yklVH%DJY( zWSXooo|CA=&g-IkzhJwJX@5EBX+r9D6SYoEK5^vG&)?ohYvo07%-kZVlH4YXSo^3L zT08Bf=gQS#jpq%H#wZKJ%28!pX6|43jYguEBaF+I8g|R7fOxL*LD_$YCb-nzLZ`v0 zNA|Z#bztAuL;Qy(^{qqEU`ZE}J8!*|1Q~H)=*u;%Gyu*sA0%fwi+$kS3!*F`zbyqi zu@#Qq#zv{eTr>C^@*b0U!2VCH0~CaNCiMj}O?cET;*{jIQ3g3z4DlN~RAl1=Mx;D~>RFXf* zjrkax`0t;|0-v8k1VGBS{xde`oQEiE6Z}4STZXrQ5Bl=DbGaoVCF5b++Ald}+;H_j zH7R--h885ZZ{O8_?Vcz9>=v%skzK*If>DUL{gYIJk(*xJ^e0zoNhFy z3DyQ}OoakOukIt;;p<4yz9G0PI81%z+lG?VRS_QAThrYWDi({O^=WIr!(RHc*BPGN zzPB=-22x^dm1zqV9&hdY)%W{zYOonvNb^n}k01VPuWSL{+r5SJ6UbASD9MN5u$4uA z&f`e~fgE)0^xh=k0YNO*8Hijwn%Q(`p(THvx8*jo$>bz=`{|UpV#3G-1G3R4^pR zVc0^ia8@x;p2yD~$xf4rcN36V0&U@R-cV&E2i`H#{RIYougBrr3icgP(3Y* zy6o1|N5Xjf+hYS!-veEWE1RJE9Dc*Vf~-TsYHJ*dm^=0h=|2X-#{b+AcX(S|SgZ+I zN3=}+!6k|xKrH{CzX5<7DHMR)7!WI@J|(B0Ws{pFv_KmV$axZuNKuy>EZ$S)-X3)y zoTar10NcWh@(H9l${Ca&*hJS7j(`=B^yTgL|GqX);1&aekTk;miMwK?uT?KpRj(zC zPK#s}`*3(~HW?C-U^&Tx{s&z;{o(&lKtSLF_rKt#|Glulo^Y`Cjl*m)oW@5L?U$DA zkB_HC_rL{#8I(0-x3Te`2YRJ}(90?_1isZL2~rL^hW@j(_6_^pY#=ZOgWKP$nqzy9cnJ~`?VzY1LivyH#{g-Dlkal-Emg&}eTJgBROV~c?2E(S zI%ztc8mU_xUXFVr2U<-8cL&bIXYX1l!^P684Rv2)}4!jx0YSJKRJF~WBA`UU)rqQhNYFBiT5c@nS$=bh>o|9`GapBy@WVD0q+Q;uu$3JTiw2!rq15R zRV9h1Sm@We^~*ZGF{}$o=L7cus73ZOMZMDC*rSxVs)2&G6kW$s&J=~Mr;y6dEdOCr zdnu5oF6nSyA#y{D!p9Ud8M2)lYn5ieT4Cm2KfV~yFN}?~T9)+l_S&B}8wq&2F=uLX zRe*)m)F)-s(v-W9Oz6TM#K*CJ`pfcdRmj%&@(R7ZafMJ) z9XOhqJzHkIagX4~L|J?MH4^eJ_w7^h>ZN4&sj2wvS?xEgAY|ntir+PY)=Rf`XM=u& zz{}j|J$I&B9qKs#XB7wWrTcQQFL?x8K#3xEsL4F;f|)I+K6QAx`PX{ktlN8{j27g# z#OPozfeySXGR?ZfDI0ECDl0nmm14u3qx=I_eTh`E>-o35>-$OFiipRcDOAPyOxare zou$kGDZ3zL0})@RQhpX(ZGVCnzw4*0G-K~y+6E}-z-Ma5RgSo{gx3kHLOH3W>;k(} zKz$x*raQ)%`QgQzoVPzfw-5D1XAENbUZzRt2v;0u=1fUk_=F{$nSI)ZapSJ;g+l1L zxz9?mMGN7GGT}O>6cjy~3aQ;$)|0T0|IpTK>Ur8X)0nGf&^bg=9`$65&3INz*Vy|w zhS8Iz^m?nfEm5NT!Jp<_CHw5d&!I_6N{Jalb+|~Rf+tL489k?b$FuE<4*9x{tmKV` zym0HX(yCm`R=Pu;HJ#)dcq=Evm667pVT__)fwc+LXCghJR&!;8Bn!1y$-GY)2Ag&X zOrcKjrPuor&iuGg?A8g>gM{ytf!ZKtEXz@vWVCCIrkwzl1Jgv3O7eU4o>nVOMQJ=M z=!M}q*gXTivjkuvr)K;p<3ulMKds2qun^z@FJhZi(8^HtjQ2OTaL^8GmY%~sEEydk-^&+^G+f|CbYrNjORl+PU(NlPPs zdJ|XLl6q;qvpX#&{esM3SS^gIs2ZX4YV|YYIu4wx>PYx@Vf}XbuaNmI>_RG^TFafh zo)tZLO`4^Rsf^2^>l-A7jhX81Z&b1OT5cUK9iXU;6X*}4|ommuFI zA0LIb>t9D&?vdO=3WX(eEi>Jd;lGFF1X-}GUtJmQL3 zc1+7A8`~?m{)}zL8(JaJ?d{b~6xySNmS6Ovv`$t55x2TmvsqlMoV%eBY0}l_Ip2xa zk7gWR^Wy!3(Y{~fWxliiryUQv0p23sbi%Glh}T_Ps4YUua8;?fmT@GxRgr04i1>C^ z6D5M^31t(eed((cb`?LOFIVWM+j!^hbFR&U3{pop1tK_51MlHB37O)C{$DDQTR@Hc z&W2y+Z27X;1ZuE{S!&=MEz`?=$>|tYvyRhlFM;w~cb29Qy8_E5u`EySEae7FcTzpx zInQ@VS1MGc?jRNiHfdM#*XxPvkBRMqreg&IU+3;?bC^K8^v=6PqApcOFT+b)lMXId zmloVwFJDZdP=ekwNbBm*b6Ag>gn0I@bWR4{U>3pVsl3PPNcs)D zqrNuDv=#&u0oH^m|1{%#l_U?ac9jzBwkfp``5PH2^x&-ynWinj; zn?-BSrA*5v2b=zcP(=E~0Z)SgJP`KgXPQqkPEUi;UuPUI{{pk^3Yx!M zBi^>f{pnrl2C|yI8@qMna=s)gzw)V7zDIWHb+3%qxREkmq5XRG$|rNV>Zzw3X{lG+ zG6Xr5zjSZ6@jfyQFB_MV)dxeYJ6D1(97T>L)0Q@o3)1w;IrMtr#vHo-b_SG@sRZqH zb}Dw^NJj%k=k7a&fn|UlgqXsMSp5Bg4iNAw!Lhu(l6Ld^knfh=Qz;O-+zF%X91eWN zgfX3;y_6!kwTG{%Z}(uDMnX|{c`2w-=j6MevkSLQu9^n`&d4OW2w$63mA&s^z#Qu= zcqR~4L^9kz^O{@LdFCHL#!cTi^z0SuyYCmj3L0j|y^sa{8OTy@N$k}^?;-^Q=j#Y> zl&3CqHP!7ihsc9>$peY3o1LS{?q&uR#*Ce zUC;%4Rj+UbIZ&0;q{+qRxlB43f(*<^PO^Hg&kPUoP7zy z#&_%#dn;GWD^LUPQvv*{lBRxUz4QEYDxstF;CIPC4L-Gkl%()rVOBCL)twc`L$y+H zUOe%ey-IT6o=zu{WDFsLrWxmH81kSAzAQ-n%!<4!BlWtP6>TgA@Q$QBnUc~Li#S># z*fH)iStl)6F_J-HJ=gM!VCpUTYUl|3?xbXPc5|ccoi7&$zH(_gbpF z{GH+@r@=>#xiuNNzX5?~^y&V!>!pvqK}qkGz5+lS5Wxor$P`Gnrn|&@J6@ZHQWUC) zAJb~y8?Hf!=AI4z5S#q-{2b`WEBq3)((||qttY%N#YpL2Xsp!Bju=|1oS|X%%&Stu zC%eCutLz}HZOGr63j6VvxfGV|;mlzY1TJ)Gc~MIsDOgI*hE(I3ek)(39gEYDwFyRf zIivZFU7P6N!l0vF8CGEDIIZ`xYHi>9G<*spyKZ;60rM1G@G{-$(KKwKr-}+WX!LpC z0~{_q7C@to{7ARm+#<|M}~FQ8osih7GZ#8$#!cWZw&>$ z^oH^segY6d*ap(4o+&)G;S>l-Ie%TtCSt>S@CLzc2H{1i)Nsk}K1fd+g6t@F}Ccle~y($1ckacOLqPX|I<|VM*pME%cx@Aw|^iZHu8r%Tgc#6|F zUx8D}q6nfv@{#}u9=l=&aoDR*C?2iBFP|e@&jl?S{ZZAw$l?)}p{XS&_m_6%UH@Ch z#orH30`}hOG$}KS+_xPT3ZXUk%lC4f1crv1k$fRvK^&=kM5> zR|`mENf~qb%V=4E#!>X8cWBt39esm|6#M)iTCQisIXk+6r>v&m6P61+Eu8aDU)mOHr+upWOl8W!ouMBV#EFaP|?7q_`3 zy)p_&?!z(sP*)25qZxR0q=rnDgk|6gN_7q=&F|;4Cn4xI|JaP z(gL$aTb5BdeH66WB=`Ped|mX6LuD(Q=tQ6319HxAl0s#+z~c2$nOH9VEJEbZ84{N5 zBPw5D(I)O~D-C>!C3P^}X&hMo<*;(^;~pySGb=7l81 zqp4ZY0Lq1uTZ66mD|ue#Wb`~xEIwfz6^3>^+3=H*9JNkHErLd<^-iNO%muCP{@QtR z!mW73qCah=A_yjVw?@P~Ge~sXvXy_HrOVTBxSXNrJ)ScVm<`;MWHp&%y!6n5Pj}Yg zqDD1JF-fRNmYLe|g!dhJ0E@r?<8tLdl~F$Ec_0r;g~}gz(m2le1&mZWxyJN|q@aaW zFDmj136JG3#8m!A6zpx9{EOhPa}Q7lEEx>sm5Ptw zpNTz!a;?v9GCOT0Lq-U@kde4Uwq+MmtXuD%xS@iJluvfp)-{&5;8{xE5f%H(I>IlI zw#LK1`mA(%z+SW8l3udt>l2PoiIh!EfHpTAU1(K}tTAbDdE?O4chqQrYP4reXzWm= z5#bHUjj4cjaf4-;tRtnz$(WZsEpqeFpcI0e4XSv_h3?9|3Y~th1vLZ%DX^|t|Eo2_|{l$Fu)Z%?NFo>%I%Qs0! z`Vo>PN|}vK_0nojy+KVGSTrZ*pHOQPh-QOSqM);UJ++Wm!vU<#$Hip4AnU6_ zXDEeAaB|%~zQ)Jr?YZ>l(&ac$v{SW!Vd{YopaU?dVBbP7==HaRW3WJi^;fQt#QjFy zssKR13xixa4bWkX*(|)LiHI+D!~QbtZD%vvY_tZadUv-{EnV<~^(YR_;Sw!bq`cDCmz2e9OEyjp2aaMh+v zsU+lk(`Y2up^v{qt8%x$Ak~BVa<(a=FbT#1`(fY+NPi)Vmk@n)+R}E{(nFmEb2@8e z#U!+KENoEIvyFr*XTPwb$s={Q+I}!Nux-EeQ7V)3$}aXqfA(;gOL6{RAmnibV}hFv zJj^O2@956&L~idAOnG;Pa&NuG*J5pLpehlcIRsvx^wduHpO$??1lJ?^fijzczdP4= zMz}T!omHN`{#+f2D#N5wWgPIsWF6_Wf})$BThM+grS^@_>|?+m~|lfMP-i$wi<-I50bgAIG| zyC4R_4bN095S#>KRpaYV94I@U_-E7|4u17EiHypMuzQ`&=Hml$3JjZy4w;lhCbWjq zq?pakcy(GoIDhVYuO6J8k~oOs%=D^UWWV}a;IxCR(ksB^7x{LB|3;2y2a9!GG}fmab+}A9676 z6fhH!^{ln(-1IW%Qv3UH(E(HlCeb^J660#f{@mgL6G^OIbY(^FKi>hfnp^6O=nycK z-_uoYIx>_x2t!cFBtb{IGVGl63h;`-olB%wQTpUG?k3l%Y`p1FQ+FM2a1`8((OjiS z^akM88Uc>VNv{h_J&y%*^s|zWwJ&QEYh+9WQhAaoGhYXdHnwcrRScXMc;DZ%(cg{$ zjihD(@fWD=MN+I*gR`7zg8VaCuFpMj6#|oOvC+BmESGadGj2%5`5t?IBK4;R%gw1^ zfUHCJ6HIKhD`gQD#OJZQvurEV4RTHDKYp2C8Zv|%jNonGeeN)PGJ5^8(QsgngqM-9 zk#13YgqyiPj}f?&c+}}U&yRvvH*VJz|C1fgnc>5GHaJP2kMWBapQx zAm^iBDR4f2n8MmcL=I8}E}X`jUyhs2UjMxf>%%cNWoG&5W_x-i@Un$@$pJ z8U6um934Z2K#gB|#ltl(O-Xh2jP9FUCjD)GD|*xDnp!bx&1G%vBt3)bi&KC!u?p$!?b()KGFD=%2U+ALYuCUrL66xV! z865llLT$HxwXa6e>-f}}iCZE_szKZU~PzyD4*)h(wY3}kfturWyR zp-g5LYeVX6?rimD<7_dCW#TYt^wKBgAly1u9rIQ2s-%5DP0xkcMSwDmk?c|ar?Ehd zgy??HkZL^w(Fu6A7j|Lw@`2+nS^Y}td3H?PTqJg}-d^Ly$)`5%=X8|L!erc*bw7<< z{7Rl=AH<$eYC3X}qP9%^NI z0GwkR_Ndtk8<*Pd0n46`1fg#Jddm;WVVfnsf8hjli|>&i5Xk9sPnel19Ng|pD4>zy zI+g&8&GoZMlO`7m`=e918-_L+rd%>eyt+}@URL)x`}m=;cXDv zr3M+?@7~L48X2zgL#?kokAjc9mL}@I=tY{E-r|;`;`c@9TeL@u7;YW|MtP6Khg%`7 zd+r%rQ)W;&ex)L91=adY?2JSiqMhhXij<3B!9*Wu-2f$tPLByYL(66+19ERlS7-BM zRB$x_MugBA36Sd@GWpgZaeLh8KG(`_8vj8AH_ko0sNDS`?qZ?COL84&Mo- zY-fJEmBTi7UNed}s254Rbp2)ah11=WAX`vP!Nykk4Ajv`_2x{0Jdz^gRp!UryYB7* z@t^8UohM6G6Z0_n)XJuPLiy?~s*|21mTLmQ5kUTv6>oQ}s`B$jrsQE*Pwb?2hT`az zRCBX81xlakA7shqTVLOIyncAjXbw9$v1P01y!y|YW6-Kht7pU%@;X_ zt4{LMj zVelqYNJ|0Z+2r<1!MRtBwcl$wNMb#4j@(VF{JD-`jadinJWEVK&H#+63NORw!8pFB z|Grl#52|Yr?PlJ((|ho8pqDWP>tTyI7QR46HUO4dIj)?{jK<&50udtlPaDzY zqA;DEx0Q(k(;hnBMdi}_?~?1hKO`QI3NfGTS7+;r!mLL6N0Oq4tHArgtuXOxiQyv! z=wG7p*uQiLRp0G@{C8-t%8E`D6hVw&SG*mm(s!_>&Eamp)MNi=UT|t>=;Snc>eV!e zUf-K7T7)Q`MpkE4{@wnX?ZYI@W6fw7K*NL{xu-`ZC0geO6uJz4GC<>3$4erOn7VU# zcrkSVl}u;7j+PtjWqRU6Vldv=0_?#QqD~7Q7Fd28f3JKFV|aaOMnvzKQ0LEs%U|I3 z@dY)tccnfm724AkLsF3bP^a`wIzCdoAE`9apOGOE^I8fP`_#>-b5W~rjNI3si>K;H z!!F*Zw%J;tINfXdo^jtFsI6$Z(~ZdI<<5PY(@FR08Gw&2jk8=v>Jj%=oEE<&MWZc$ zbu&_QiPFlhLVobF;}AKJ(aEo$3T615^nUu7lZMn}^n@}fplS>$|B+Se1kR&LX)k^u zz^y+3w`t`7-YHFRe#k|huJk+ckxsOPsoFkpXC2K7II-H>q3(n=a89%2Gbn`)-Y@Z4 z?!SSZ-mww|Zu;t~1lbc(BjavgE7y@l5ibO;;GtLIo%*IGuSn8}5~#IpNO)i_g92LJ zlF>lGb~*34{OR~vM=-py#8=SZzCvXTz-4B10pZie&L;;4-Dt+eMnH$;qy-E_ho=M6 zi$iH@tuP2yLb$WM1qEB$q%z!>{*b7J6h_v8)J!=Dm<6QwAXooztN(HF7fuCFP~}lR zZlAkNHon&JoR_=n$Fx6QpBvVbm}&4PLMd9P-Mmxg(loIyp-%UUn6&SgysI$>^$Zme zCGv8VB~+9-?(PRORh&OBg+{{BFsIa}a{diNSeh{c#Z!l-dQIP=j^6zTY&*52nw|7n zeo6J(V<(4CN_$F-d-hJvnxDvUXv(yb5H0d^L13LOS6?QJpm=Bh|{ zgXa^7MAFI2zwJP_Ca7if)mhO(E4JEYg|Du3gj!ooVqm$0dot~jmtiTuH zaRZ}{xPeWQ$%mLJ@z`k38dJaF-|NeE`ltY|sq6Z8`OnA9i0o6f*+YWN*84OKY#HP@ zCgWvY!9Q?ALg-wJVW`W$}mdr_p>VG3%`+nJ-&69r|>7tvQ(&=}b z<#8@)^#Hwjz*|)$@sWYZ<&PiQm;<+WfsVj((}140+px$-EoaEi4*;FW?=AwvW!qW? z)$OaEO$$rrrAXGrjX{w|;8ZGM819{#z+9z45++QPZ1WsV#U=u|gg{VjWdc5b z%#LRe^;_jLF;qCVscm|i`&NHiClq zWn!VN4K0bk$vS?JG@)Sp0d*eh(Xg=R-({^dS?5IXuK$NQ2J#8+8r?vi!5aofR(gO_ z;v_8$l*vq1%XXKaNA`e5iX(}N!;&H=*1&|eVbfg&&cDCDcG_R=0^*N)8fI7sA3JX4 z&#_FC+Chg8a5TzdRiM9PSs!?SA^ilnoZ804F^hFCpttk4yy{M zru=sBa+w^77KDm4obcGD>tw!mRrV39f4WkjHQ(3bMSS(8raS_w2YwMsE zSm;(vT7#|ZTSpEL`^O%9O{LK3&EMK=)FfNpOsm7Oh_`<>p8>0T?wRM9GUFkV@2)%D z5ar!}Xx-F_7wX?vzyq;E>^#MD%Fs^zN3$(IYy)dSG$ZJ=p|d7tJ7nkayD*VKw#<5W zA36hUhxRw%C~MjO&6Wj=Z2pyhg&lUmpJRhSdv=c*2K)HTk;3~Lx*tgn98=I<6|%Tm zWydbiQAn#Y>--$0A(Rl;i`P!tdJ{_T<*_*8EdeSwf!sGaDa5~_9q12V=1l`$XNy4 zuvVlnGBvSE#7>3|88%W4Hu|9>Dj`vg-)bKqUUNoBx-p&xKS1FH&2H-tF<18OjYu1rQ5vU+^sC*q5(w;pK#C*EGF9>;qa;-D5oO&6%?13WIw2>J%X*NgeC0d(o&alChI43*>b=W-#Xr&i9iHIJ`SINlVA zY%}|Er@zc3rz4c?RZrNww8e zaF^dcI7$)cybu<{_iBInSKgQlWs;RAO z9}S2K78Fr>Q6hq%bm>izpnxEv^d?A?CWPLC!m&_AKtfj#kbdZ$C{-a4dM^UfA)$v5 zlKfUY=l$;Y{_ePA+&u<=Fi6;Yt-aQq&wS=H=X#RhCyDD$k!fpOLiE75;j721ghlEr z=sDLKr=O|Qb66|wbuPBoSu0p0lt}Av8`x9naRfn*XelRl!3y@U0b5lPJ*II*R8mar zeaJpI_k(-wJ5Di{C0$%)YHKfr8O@K_?U=b=eS~ElU=3ahr-MASvn)(4Ihr$Eq;nHdq-=svG}YKRB0 z=iD1pJ~8~rh-mqJnh?epgeB!TE0M$PJz-`sNgd0<1oD0Uildl>O^G+ik>U&$mu#DI z|CiR2_}*f-98)(_%gD!OL8QvIAK(1>F4x=7PkHgKnJdemFTD9i{&|{GT*e`Fg&wIf~>Ta2#d8*W92g#ICZbNtWnmMLMKvN8N%C z3JH!1nn|@PCV;ljBmY^K-ZPYP)_%*Qg)WB0i#!$#QB|7guA~yes0BO zHTW=Tsbg|qaoebl)T6@2{=IsxOsoH=NwxQbElG<29J6lJB<~d)FQji7-wkv4z}Yq& z-gVl5&Wp*U-Gx$c5;pv#p_&;gRgh8cHBnrLVeq@^_%Q>8-xAOVi#dEoz@JwyT&8)_ zcf)tem-Mi{cr@KkS>5N6H0491^q(gJ!qE}+#}E&i6Jfq1^+ zMUgG_4LSM=`Rdl5nvrG<;_}(kKR!7WPT}XY&hpy(H#}kxRoVNgS+iA(x%x@cWs0oY z(!u8d7N)tQ{M{GTt{XSiL)d9dCZCU7f?m#TIaCCho!TgguBdrVC|(7R&iJx5iI{2U`+B2!A}Xark*6f3+CHBz-W;(sB19z2 zYO;}{VkxZqc+`{l6VaGEZy6T;X>5)>p6MK=z*mYyufPn+FvNL=mnrO<$I z!ikZY(qe3C=*Gnv3|k({mCm1$5;m*WGcx^+4tlE_=JIyGZR7c(meggXhrD16Lya*i zPmUD0JC?v=zF$ycnc&ZoVd$xoXJJ2Sh+|`SnxpuImON4 zAoTzN^9=d90bc!@as*?i{4#~9Nl(TI_7mm(77;a0=cj8IjPzqy~~Am>)y zVh(4-FLC7*|LwZ`3{K3u1sha5MO1B_(hxmY`8=OVrrho2%8=LYC{Zo1wFfzq{4Nn- z{Ukg#fcJ~HM-WZXUPnFIT%2~ta(WNLEGfDuqGfIljVhf%94Xj*d<*XtNau}}22%+PJ62VRdm2ye6npJhLN0nP8e zu7IV1d}YaU;Cuh>p#+S!?H-y`(qs_%l?E2k_e?y$pzm!_(nbDT9Vw^9V19d+)t$>) zkVQqBYrUmBM?K65=k0^*=GSBdkXqzwmFc*q&6r}b#EX!J{)q^zr1<6cEI)R<&c@_( z3DKg1L>sPtbB>cWqzoR{y0CE3RMMzI?gYpa5@RTAa3Z@m2=myhZ ze8kGL*2*Vgy+J{47ZLc~)C>blAD5OgRGhJLGnS2}2E0R^$U&ut@lr)WI_S1)+)2@) zpu+u1>p?5@)aY`hb*(HIT++lGLBRv4|Ed! z7F5OSJQ{+;rt7T3o^7nPnjxH;Jk++h3#xJRq_%CpnGunUYRNNPawbQSq=YuEAOeRx zZonGblB|c+z!9kE)bP!vxS3X^DVby!1 zdqfUwNuMm$PKN{eFw$>m!KR#EAU<|O8`v4?3*NTwX_t2R+U@IXCKmNRn|ksE9gZ!n zitt_;i*~_BovP*=4DYb!c3)Q)}eD|3TtltM0UO*t~jCo4Y%t6q!p; ztytEI8TmMGW>lA(?9G&NLunS@1##d?<-bLe*mi6TP#vBQzPzmgF{DS0D#UXr^FpXa z0A=4it8>_ZSNGfo*?=}g0)U9#_?ns(^I&&(cf+X(pm1;~>07XQQ`E4iY5Po}w`VE< z1{;7av&QW}vo@9sn-Z|xVqgK=n$WX0fg((ol@F&*hU0m=md;DS%*24U zg~UigQ^$O%+uY92*|jFtU?XQ!FTpbqTS;)F-&Z8%!?0i{_6-I7%*!T_3H?bmQ7c2Q5J5>bkEjQ2Fx^em4sB)?M9dzDWrPbd< zYy^$J{A#*kgX!L45c_9Ky1z@iry^LQ%uaXQ^3ZFlEDLO)Gz6}<6mf>%&Z6nlyNI5cMX|o~{USbrq{Ur=FhYR_5IzqWI#it7-g64nZPyRf)d|asf<1@5B`iY~G+?sr z@>xgEH`ojE znin7I(r75uLC#gKt*N;s{Kk2Y7FFo~#X_yQP;j;QjpZ>ngS>qcJSs zW3HvcO5Cp$XqSwC5iIK}^j@@H9_h`r$YNe3fj?K@Ie?KrAn|5Vy>>&#D7J~YYdCtB z3n!-@V$5J{K5^`!Gys+D30)n+PGrTR9_$9JNELWl;J|`j{)xf1%mp{@((Wh;-)}f3(WFEF>a`Kijs)O zw^Xe&GEnjcvRLnLh}aZhXd?^i0-aglPqBe7bjOLU6n?<;g-HKgu;EN-I0O2*R3Qz; zJAcUgh*rmb$a-+%VfT%U&@TT4YlUv@6E1%7bI*o8`1bP(7r%PWgrwskIzR&}KWu|ITv`F4P&!hsHqhDZ%v7B> z>8mMvgNfBmeB@-27mim68NAtV$qxxBi>R$v@%sLu6>a1*VK_iF6Iiad;;RxLjp5ds zZxcq$o*Z$jB+0cMCB`Z4XRSU6d&2Zf-SL-+`%IA&PQswcy7u7PhpUy6T!Tz*&=5iIMK{kav*ew{a4s|*+eaSmq|iu|GWqdVfY-f*#F@Gv!#YEiJdBIG(s%n5CujEPi&N4X6;3Iv1Y|mY)AN}Bu1bXfN9`8H9UvI zKa`i61=>O31B_=cm5jWY5Sq9!!SdOjZgi{R=+H0YEU!xS7o8V}nZ>(0sC};GySTGw zvLyrRE>r*=R>-rj_(x^VRZK z7a|yQZb8MN2OJDPHAQG^fGNDW5x6KzI+fRs&Dq{Gi#2s|a z696AO+w9!axNbYG6Tz<-!Q#ZVsK_XS1knOfUh{DpxD9tpI?ViWWT~6=gmsWO$1Pf{ z-UK)np6cna4!sicK~D|k_fx*l#+sSrezpmGapOr-z1qqVMsWSU(}5)o_Abh*3M zf1~njqA`IWtQ#+L_*suC&>XnjTvF2%{vpqAx`RH4?^nK|NX_(i-*?Z2A;Vi{O_aJP zUML{RJzHYu}1@U#2@wc(^jq(SNsI0gQGyLt$-rB2jMhM)R=YxyZ>xmE0Q9X~c%S zbT_cO1YkR8Z8dKUT1 zpZLqlVqplpxxV1}>;7AsW_ZF7ZF3ESOUi#YwLNHkC8H zolzxEMSJLF-B3H{!cX@v2XU}k0_0?|8yR)_kzIQliqh>lzc=zl6yaz~yDWuk!ry)_ z08W;M2ngJXoKLv65MwE}x9Psw5>Liz@aF6!VzlgoKr~~EFUVlbv22n?D)zLstd|>ihk=0&!iRrbd>S0m;O@mcUDTAb*H0{o~NSn{9_Cl7)i23AqEcOMy3N zJUM8tp>Gm}m)4)Q{oxx35vV}9pep>pihhP!s#R4|G(?gMA)2j*AU)lZU$VG?4k=46 z-UP=;cV(~B(p-ag+fZgP5&73?wHT@yY9%%V8#8C}1xIQiLohP(r=YGjT zacH8*_1`@YIzk;ceQC#UG^OO;)Ltn}_jCN71k>uL9@Rv>#_eXu%o8TAGe9KzkgX;ckQ_=^7aHKnAx+u!Zl-?NSItAKc?#i}_P!A`!&pS#G%dD!wZvz(tOk`+olb zzD2I9`ECZR%~{a2Go1u5@wY+2E2ljRGss6TRyEaD+Uek3GvuT9FjdGWXdtpS|J zC0VW_PY3()RZs>660d@q*NaacV@X+4Z3ML36S$29xfT<7y`Ft17(q#5g-!WbEXc`N zAi9wwLF+0N9aU-``Hl+q1?zCiK3#zfyK!4zz#bMu9qH_IF zvpB!|nu|@LLRIO${K8L_fT!95Xi>wD1yk+34ZDU3dlb)Rm!pXcZXJY0?X`RZH%hBC zqoqQl%&heAgo@=+ny^sD$TFiR5wR^G$GG&9yCQ>$|>9nUxeO~u}GgVt@^m!yto_zP9_9d4j1XXq2OQx@TRp zmyt@X{#o2uH_Gi-wO2Xpnt~`4zKWBeg&V?@p_cD}kkTZhF=Z)wx6pjiO?c@x<$#ye zqPL{R6i%+ep$vj~I4hk~6^2d&Os{}nFK;hY5VDLk_OVUrpz}-+EAXy8|J`&$8cXSm z=_n+2Fde2z9^Qg-9U0r253vj<8E)mOs66(e44yzTood=PRM7!`aWLHwjGgIl=mc_4 z8RcuSLC(XozaD?_cb`4mXNL{@152TjF*KPVx)+_%xzkRE6xd>i(NhQo=$%vgm!2+& zGBRd^{gEAGDJy!D;8xe~csh)~*R|tBrrPZVA|dYkoPyteZhe2=PLPaaQE^tsw5V6& zN6Ezu(2F2sY<3E-d~f6TAuZ(B?}nfkV%#NC*AYjr;!@IXGerY=*(gW=A zPXd<^q@C{-zpEG1Jl_)VDxBK6>C*{4ri%)eCpMatGu~$X*_|OD*%g(!EYf!zXZhpJ z!=jeVq)HBY74k-f5uv9E?jUx%0C|iV`GjwskflP_*&^23BcyaY?E_TOTvgp^NCZ%JF&a}o z?d&@PsD?EJ+bzufhDA!#+Dw%3hzq6VY@1^05a0yn)k92fE*j>g*(_iv+Y7st$F4)l zUf<_DJ4W1iye!eIJ5vb!VAup3y!6Q3meXk|g31U-CWS7Rb_N7zZ%-ElkvlO@KAOv| zHX(_N)YXlyqio_)%BxNOM1ha>3&NIMw(=tcOq2V`$6|Ejf>{+@Hu`XAXXH@{4hp zGn4ESg{9P(Tb0<;qDB&SwMH5n9#?2v2Z8glE9xm zQ*X)4y#Mzb*S^s?eENUiJxbOBeDc%IM!;6Kl-LWYq9*cqPR76tLKcuvDu&~a`3&$4 z4pzT@^zH6SVlGw7x`%E3b04#51&R1za@P0zO7=@wCC9b>li`=kjj8M2gNZ z+3?(;i|Ht((<#AgU5KYZV4R=tDAFa#l?dX7O=ZIE9pdk2-M@)6@}UssPcO0Rre|A^IGI@BWROBp|+y?_Q@B6R*JhX`bha3E$0`TnmxUy`9 zH*X%{=ZZb;7Nm{E=iDg;B~OaCIz$;vKWb`EO+|!%bE`tS;7%d| zQ}|7GcrsA!m7N@zZ4=SO>!EobN8i>jeg~qRr$dJXM{?%RlgOtSXuv?UOQO!-+N8e8 zZZs!hSF&Y6uOr1x67w@j!-6rU_2_HiYG*-7VOY@V2stRe@nHLYcT3Vz8v3QP=cbY0 z-X&&b(W+9=;LCHpIIY?v6K|pr1Mt1T$CwTxW0T$yH_?g;bJrj9l(0_pu5YUnK|v;1 z01)J@OlRkV$wb`;8Ap?Yc6t%L&Y5~hzg=64Dibs-`>b6gpYuhl_J$8n8z7RjZL1?9 zRe7s8z`U|vF+;Pnga1}$bBe~UAu+9d7LQRy+Dan|o$oC#<4?^V3`OJi2RxA_SMoi03 z5)E7~is-=TVoj@YV8>C@63agYXH5#rFWkVE>`dpkbg><;TU_MS$9&k)jZEcYbi++G z+*GOvFclvc$6?rx8*^GQ#zY4eUvL!>v2apU@Ac(~O&F0g6ry%W@K1q%}bk;Fw&a4nwLHu36(<-UZ0C-TyFe@IUY zq7@G%XiMp_cADwAGPEt{*OIYixu&Sb`UCb2_8}LJt!ZoC$>1fK@b{(T0~lqMZ8C1m zcSPgd<0{cRLuS#^w9VERpDzU6eT&qZgMCd}*e>sGD4E!Y zG8afahGng4OLIsn$OV5YCmm^ot|YlNB{osqrVHpkhGy}kME92t0nYfX9*t4}suG(9nREABgA>(MqU%`&nzwHQ1b-}#8YZ_qZ;AI{<5hn7w< zWqLFID0{mx4g$=nj%|l7a*G=)LgUBCSA7kaq;dKyI|0RMaplv}&j5p2#+JbTa^c?f z5oVg_>l5!_R{M?Q#m%)qa`@}6$8KvH6St%;$jXd23En;(PnDsADz%v zmMGWS;7vMm6y`Q{>Na#0TznW=dl{ZLsKXvK*68rmHU(EqGy;PR&+xmt=)0ir#LFqW zt=F*u`)&n@_-A^D`~9)Cx8Uq!^r32vdg+xO>=V9GKi|H1q{@&+RltHvwS$e*TE}f= z^m#!Es_KG;vO)YEb3{j-Cfms5+wWNQmMf+T8ul#wk4jM9uWVnp{LC|m>s&DD&{ryx z)I`CS+4)W9@ocf|*MxW#8j2!xoY!Kd*tf=+|LTrAtNtL zT0JVk+qqu%a$6XeC@4i4Kpbqu&g7bGN7r_39V^!gLmE;d1_o+bF=qNcJbxu=CF)2W zN~j3Dsyk0(oW3Uhsw}M@fARM6v%9$~=bu^!W1nf|Ntd~9Y!cadY!CJR=E#EL0OR9Y zQwk&ghUH?7_f+E-f-t!X27 zf48-;zmYLxy0?7=Yh63m8xgbUb{7x%oafBRGdj$`am z8Ix7JKQt`jo0#jMT0j!Q@t&ioGW8t*r1cOu-ceWP;90*$vVN&b+Kz`ePkSO&=|9`P1$W<8$cae zk~=(m#`Z=qM&%TuN_7SCFp$q&Bc@R$UUNN?Frcy}p<14qdUtpAxo7k7A+XGt1MrRK z9{ri7&w@m6>#JR!k%~`U-N7{Z0bWSn!toLL;JCi<*x~;ZGue4dE=x60%C+iCFanuj zH&l-eEhzE$Z<^-$jP=v~0e~jCK4a?8tOt?lHf4YCCD{26pD3FXP?SbdAY07*anO#7 zFVNosUmc#YSZMt}AEKIv=va6v*pQ&W=DO4w>H-6B_8$zqEPL#jMmQ(cN`neG>b}_( z0Y{dHMkcqc3|FoVZ1@DX9L;MdQ$KnH2<%=}Iz5mX8-5{J>G1Sl zB$f-F$nn>Y*tub!&8P~XK)3*q-#-93#GUPa$Fn~m69Unp!a3?y(b}N@h(j?wgz@6c z)B&TKFpx&Ji;KPZQgd69x@jR|O#`f6+4vK$FZBA&)pO1~bvRCQP4yUw^k$&QX(V?7 zcLd{)mru4*{NohGmt+M1BH!>jY%ca(mr;W~z4M0f%SfoH{&dept9}kD>ZQ)w|MiUj z40L)yYe0z;;1Hkbwr9YhHfA3EPEs;o*$54F+_H}ORNBWyuhO^%Tn|B zRP^3!VZVpgLg2H`4ZM;RMj`%(cFRI>X=?=ogH#bsQJ{_&=&r?2NxphtZNaOJI{0Zt z+bP`K1;{xNBFn4X?)tawo2r)j`A1>MAl@8s_OMu5y|aah?|y^l_$#`Kv6CII`xV^I zWzxR47Mzt*vJY~lRN9|9su{M@9WhlhJJXLDLq zc(g-1n0xKErW9RfU(_xgw-@?-KLE@=$Fg~o)D#dj&EYR4nt%9rV#g?}FDobsBjGbd z*ShZn*0{NYm)pj^-i8Zypkh@S9JS5XvASl+@9+_Qv)19mujGj;Kh_!<;<5 z@F$m0co4QVc741CCimpH+vdRwU%#H*rR;KHnNJNprqZHm9wtIfu3C3h`Cs&r(?6)$iaJ>=k;5#T)@rv340{EVf za@+e>@;;zN#`0AyWd%`E4nh#5U3xE&|45@iRLvD&FPK*Kw{Tk3Wg70QnjIpt1;ULK z?88?EK$#Xfzha**?Am(QGVO8c6C!(p- zaU5c0=j9`+858Vd|DQMiuVsO#I{sT809qvcBZ20&xB7K&6{nJFCG^LJ+ELe^0ioZpJDdhQpM?U!G|4;ybb{bZFr4NVI{xd?=wO!uefZzD0 zP`5AC-SWveniurM(7(e%oy-3w7-+Ra+|Qihy1uCK^3O}j!MuK88b0y3r({lUYcinP zxg<4<`vF>s`X73zWWZ$+6I9^N2=!5X`a1t$n1Ng>K|nUN5O&?W^A$L1jbNOy2l;D< z$H7I+6u$YKKIJJldXa-_?3b9WXr+4pU>*=z#{RRBR|e_>9ZdkUF^4CsxkWL2p4)oE zb7U}H8np8jBJ@UgEA@OyU%j)VHSkz~pHTi2)IF4E;)&wC^W;@F7^B3tPvhowm5f?o zRdU1f zR;@%PcJ$*Z-KD$xy4P5(`;7r|$wJgWWs$!{gVqZwL5V+NyeeYk<6JMgw|j%fuOXnX zId-OCNu8mRwU*>G@?{#E{ch|J6`@?KCjm43eLlZFK#zk>_R*whptWK&FEWKZfTCP( zg!>A5iDEVF4P4O{POeQ?Xv#6y5uowqbgJfhg}&~6h~#W4T-o9%vj6fijp&>p(WZC< ziy&L~D=EB|e%%QxrwfYjH+CgPE0$QCn=*Pm*1bh5rWB1Bou0-Oi++L^)g1ND*(-y? zFH6Wx2kh0C|GBxc2I5xy2^DR}@5cJ4kH<7h#xJJzC$OX|!9QEmC1L`18t3q96sz0v zME!17zDP#2dpGaMzWdZn;Q^jsGl|>=7iGc3jJ#v)sh#>{f-YrG6U?;n+}N2TuX_Jw zvUNb&!co-p=A=L3a0uZv_ zAB{dnsV&(KX3ZVYRdE>%+Tz%nr0ftWK8Gq9f!@EBU>cj_K?5U(#`vz1NY4u;!N6=; zYGsfI1=g^bRWI8;ktNya#nW!bsO1=2~ zV|gzSS6Xpc=Olc4S--8$9~dFKJ>KB@(R5#nhBv-Z=V#Y|H;Yo_nW&$?YyaM4E0z9? zkMm3_Af0}GCGxQsO)dKJZXM#?rlo{DC-I?)uzlkjaTEHm;hh70FZ>cv0)^WeS|Th<4}OC}#OREw||PwqCKi#RVzZhRLT?BvQ}zF5I_ z#i+JT5JP{;Cdol-bf>Yg4O28Toq$ET&HvbL3!-|F4yT2s)_XtG&M7NiP}F(Ccm|qk z8Z?#v_{`7L6Q$iuUyF&2^{`H#j{SCGEy#!0-FBI_-@?IpZu3qgwoHw72#Yw0s75Z# zmm4FN>N+ty8bvDOE<7acv8>^Y2@mTOsvwfyWFFVx`pFA8WyiUPnr!N~sQx{mt@eqO zAncgCOQ;wiT#lvvH`~Hw1`A;~D9XrK6iptJNN8EG%n%ws7ViDB6Lr%#@@{0FowW1h zqYB=;f<~N!>hyk?_Il;ItnF0#u{k-W5XLiH+mNb?Bnf=4D)67?e~^{`1ZU=noJCrDDFOij8aaSz#WEW zlCCu`9Z_C!%?-Q46=l-(o!!LqFH%A0H-^1Cd6SYi=(D~qQvH32!|q>Hm%4OFh&d5K zxQQ{FOlUfCTZ%a)ZfY(seW(5Wtbv#6NA({pP@K^fD{Lr!(9#eTN++%6E zAsyt?N6u=#;Wd?CY(fT3MI91>9-&D-UkI~(k zbdGPCf}(zRWIFGScCbVfQ=P4>tR@%K(uZ?QDztW4cZ{X0rwuwMOiR{XvPg2%!Rduq zELI%9;Id*NwX=5e8gQOpv!vrMz{Gj}ppc55(eve?(`7kdul%loK3sQL`2P5j|Lc}2 z*n-s?UQRZ+rTF1ynbgg8Q@pdD(yjBofY1*)!50$V&)%mdqPuwZUlQI6Fe?VsM?CX9 zo;%7kOQMWusU%jm{&=CknDmB`^|o~*Q#>d^cyD)SuZ}a%kf)lt%|5=1G%TE(qN4Tb zmdObBae!lfFEk7O!Hj^$`+3DKvf-$ox1!k7x$5xHDIy~v!#5vG-zF%Yvh*oZ?pm%; zbA@kkB;zEpHO_`7hH9}NB{P&_6(vfi<WJqWpD+}ryZN35VA}JlQ3I*x0TjABhdT&r$+_7!Q~P}2r_mp&cuq+k z~L%t0TcDr$o4L0SPNw7IY>61675sh*QqmH@h9{@@n zn>~0FnwMr~34x@hQZxUdyg3!W3iA@2lmOsQ_R%yn@%479dlr{-yRVuD>uve$_DA>I^^G#A*gRWo;%#^wmPBCb za)5vWh?PrA(|Ff0X$k&&o;+qi@EGB!n0yH;H6m)62$GJ)%Wv7>S?#qX5RT~rPSrm8 zF(l*yFt=j4>V&+a2_7~_`ggJMdYbuq=dcc!-@T5p9_y3B#6;KriS1J2fyr)6+SX*g=n$O`NF>(CsVjlT$=D_~m|)@>Qha z7Hq2D;(kpHuf^=!-gZ$ve{js}_sE%wq<;O%{3Xm7@gNQg&%}Zpai)8lgEk~2<}TaN z3}qNeEYLboA)$A?czDwnuk@5!>uf)-l{9Ja)-<;nPE5?h*L}`iI=Y^AQewyY_rmL_ zl!zHa%W9z_l+)A~n0d57&-^^c<=}Qgz~rm*%yz>o5q13vBsGR*#s88d%H`F)v=&;( zT?NAyRH_7qd;moE5pv#u?8*Fc1N2i6nLyw^Zb0GhV(dXmnM0Wcy1;Qg-#QhU)nFh( zn7J3ZJW=WbU&W@Uzle-u9eOYqC=U)_!{pTWYyWs@+XQ>NbGV#JkB%H*_0G0Iv2*eK z%=Y4tZ$H`bXrW#qzK4QtpeySAqq4(dJ*Xq1ug~J$Ld9$N+U?FsTDOwSTj_kMIG3GHzx9c7jBM{7hqh4B8z-iGYI z`z0>)q?ND@vb);0OiSBT#woiQBOK#D6c{Dd)^>qkEOC`+8)F06Y*0e z!Scvp!lISQwrTDc{iWRN%vK+@#*S%+?!Gj(rPeS+odYIn;80<>a-GkIudg6rZQoq| zejf-hHty6!c`)Q>IxG;BEqF=JFMLx0<@BgHo1?VJ`?7GmW>`Td!J5@Dp-Yf}E z=#Va#xFH&gS&>50Hyes0blz0Y5Ta&3w3HaULU)Ng__mwkqbc<2Jyk-e_i=kN;xG1x z1_PHYnx8RhZ9YG1o5um;`!U{-w>*?q9XlKWd+!h*ZZRotXe)d%J9MP@zJia6 zS=eYyFrMbDHQRR#5o+*4Iwc)0T)&JHt!kec%GK7L*ZmZsJVKIUNoX{7< ze@ZiK)E7Sdy}e`j#oNTM`$cX?ZbpzYO;%dfCt+@cD2=ws(ZpwMsczuz#MXesfO2U0AuY1-oDjvk6?LT-gcQB*bD7eKmpFDt zW3%#@Ti<^3e;q(b=5((Em7XKv8G6l?Y1X8n#+qBNK>q3hof6d>dzHZ^X0Sak;J0~J z*p|??H6r0Vc-fq&Z()6lPfX%jQ->0w8=-F@$*dGRbji_#xUP_T9G8%4$BM#%-FO}8 z<={^cj+)`gKw9C%mPA7$wVY+#U=$0rn1p--H;{doa> znc{d*AR#DT?(t?Yoj*dK-BXlF;VSW*M z`{kiB`JLYitBPE@^8WNgd9Nl_TDF<3PShnFH@jv8Mw$P1Iknz&WyLrZq(@7zHr2(A z_0U(JVG|hic@|p094-TnwDX%6ofHfIWej<+VRtR#Y&JX*?7^)z)Ft~MYYTao+pluE zwW!Fh*Us;^lL-HWE9s3&avU?;TBWAsWEh{~q3!e;*CN{xsb961dpKrOH z(0;8({XYF^;qOn;N@G|t$;8i8L{{Dh*ibEM7`8~(5o)7q?AOu@s5JU(_?V=UHL8Q) z?|HbUa1ht2?1m#p&&wg1FL9WH%!Rxgp}}*xi9HcaHQI4Blkk4_nl?gjKHEd=Sp*D$&B!zM2dWZ3iS@Fk`J5s>oZ zR2FcHeLVoQK0bkzdy88!UY9?6?Zy6IJN<8XuU@$(Hb=@*TPZ@ZGm2vBjjlY*0jG6( zrq;Zl1cHavXVe#gB>`#(U@yr=52G4fTVj7QK=2zMIB)wWTBg?3vt9$i&$S*~=yJg& zs2Jz*P}}nDMivAQsKlQDB?wvN^E1bgLeveFDgN31=UiRP9w;@Jzn!&VZ+92^qvs_4 zVivCrPm1?nZ)%yo_z=_?fKW4>DSQuh9mXH2!yq;?vvmuK9T$X|!>*6fH{wx;CZ@2b z`*>a+IUh?noG7n)C-+?}a%u^q^#-Ac4Bqd2)ApkPyF-^GW=PE^kXF0Ddv+cbd(K|U z{W8FERTk*MKh58j>($^LGsc&)jg2xa*mAn4NJ2S2WnQ_t__rUZ9vqiGC5ic5et7r< z1TtSmdq%P@Zh<1M9PSAdUaR(H%=wY?Il&=6-+h~V)wP&yNiol#eBECHyf91ZSVE6% zy>ObmO7;NjDXFK@Y>LW&d5!Fq$UXX(*hvIz;e!w)m^*+@>-9H3(hk$bx5HNrwO z2dt{IRtURN4b&_~&NKcqEhMCUE>H=!&a#9{$S6`o#PlWl@r=~GhU8r9gaxlNCs@rJ zY2q4gA*0F91-VO($kPg;3`lMr)7}If70F947yKG`!ckAWjcl?aCu3~H$1vHjx*dUD5gEFym>E;ailn9_#arvg%e zi&2*oHeUcQebbF@43L4)EYUwE&=_qBnhA6RTZB(UB)_?__@G4QraESB$Hbd_FS5Bo z9Ec1TtS{Pyg)3CKq;J)(&&yv?rW~Y>6g2_~K(8{@IYrW+vr$elkbboubd;AF=sa4w z(L@IkOHt2`F!RgQpQ`sWogzYeFFdat2FX-SYH`ne~M?lH_P(iU)+@|my`P>Ig7$WADxcfd zT2p38`yqcD^WiY-Mm_6I%?bl9U25HlEFwndr8^@pLKlB;bahjr)!+KmDUkgFS{%<* zth%Ivb#WRsab#!U0=;JhR5*^j@7PPY;rn5Nmi2%PW(GEO#9{iI;7|Xx@p^rM(NdB! zH%1ez_x{We_a8reXC>YTlTg!dO(7vw;016C zh*)I=Eb6W*vqbUjEf!o)(cty_ge7rknm}b<{XQ?raeWTAvih?Tj!(}FG%o#dkUtlHRYt`0y0^67RDZ|=pVVDAlJCLH2P)+Ru*rISk+qNMya5WZV%jf)rnMC# z4Rj!F`Op{#TLZfk=v|&j`Cf$5x|$;TcW$Zs&4cWC`#oRwC~j(gthCrj!}SG-&#nm+ zC-RK%byF*z!ckdJ>(rKxALs+6J3rrn2upkG#oG8RoI)>ZGiYnZ`ujua5IoJ`WD}L#blO*6u!56!m{m0t|Dk?RG=T)*)!S3m>b;1-&2txp%xmLm>5ZBEjTU-*e1t z`LZnA(iq493NPM>3X#ZJdAqYoCRc}}%I8v17alLd_f3XoZG(%PyC2Bp?D+)W1YHKo z{@`ZNY<982A+_`Ad^&F9rvGCNwR@A9G+%0NnwrEXtggtR91iG%P9h#T()&s7?KO(` zBbmDH#%uz1RrY!DaE(f0L+8%^-4;zoMH}uuhy?e}0f@bw6EU;U5Gwzcu(Nus&Ck2( z)+6)-1gX-Is?sAs^In z8xD}XvmymUuaQIWN|w)y;2j^k=X?yb*2(9o9M0Rm*Jm=6t$zIt{Y5I5eh~EQ$L77R zRmRs>V7f4kR%_42s~W@YPOk+;13+990VjbP_>b7BhM;G=x#yWdd`vI)GMbq++x@$d zu9x6rJ0|NP=B!QY3f>Lo+s`I5ukbaa9U$)ruK6)V4hJAXY(yv&DGp0al$tt!;fkG9 zYODCV4o*{C`ddH!jXpN*cn`dbEwm?YIEyi>i-acOm|ZsTP%+h1c2#Q8hO+AEsI zPnf~7k5{8hXx6-w8pFD@G~snUcmrk)b6{V>-T|TS3bj_I-!mrMsZwVqI7j5p zD%(rWvsWt5a#dcjLk}^Zg#y9AT2bxF>)qXQOfJy3sQ(-tKQHMci@st-AN5uQ?x5)<3)*S}ut+v; zruh?)Z$CliRrPcmJ(^y;1xU#YC%!SAz1(;H0vvl6`VLGRWE={6GW;3|v^N?zprOWf``XEY$H&vmgZ___oAGSHJt!hQSactE+tx;ppQ50P1J|i9`EAo8JG_g4u+{d zsam7&yz&NF3x67h1a`idYl8Q$o$l26W02ldYDoyMCcpQeI<<0d>~tKcw|~sEJz2g| zJlObE@##~uR#<2^XjDP1X&1l!w!KVN`i_+Apwq9n9X7R!Cu7btFF*TFaayl(%V*5T z*ytczPs-v0>UO#2@Wo#QEh}1bR$OgQjvT##r{JFV<~k zs9P}ZtT-=7GTCtapZ2ai9LlzDPhWb{+nbUtgUBevQ`wDSq%x$T#!eG8_PsGC*?X!t z3_~eJ%}|CA(J0H5tr+Vxnq+G%F?Ja-A#2|?J>UBt-*SBay??ylaeVh*j@y0QT-SYF zzvcX$zwbfhuwMg{`Pk@7w=A@8$IGS38@JG-8S zvoA=&>C!Uy=sdX7^@kKhO=HXW>5uggv}3Sj-_N!`eoL*)$K(R&BS)p+TUzA99a^+I zDyaFAI>Lp*wvsJquz!73*t`&s*UzsJI@Vh=KnuL{xRClvu%RNN#v&Sv#stOF@V^Xu zT1(>iLO}=+!;R6CI)NR|6b=aVyum@%stVO<3dnFKZa5`nry|P;EDr`orLCg`SVBzd z80wC&IUP;2R=!VDUK{Q%MT6WT*SMUbSrH-N;@8UjIQ0W)N!a)PCHx*F&Rm?y|S z86$x_d(Ay2C|BH5_(8x*-Uv;I0N{){ybt(k*$7P|ShwRG$YSBbKbJXKZgTAME0B}D zV!ib`mUy-)!RfJ&i!Fy2wbAWaFY3QdTFuA7B zYl9s)h5$RgcP305OAzeZ0k$aMUtdcjp>8jR7lR~%Ui+>pwc2z}&?F2>c&#Fwk!4ke?ItM*-d}mI|l_5_v?DB`-`3ge*T|q6mavO|3bXK5Tw4A#sz$>5ui15 zl9uP>HCnFp9GEAUB` zpVtuvZnif^yuC#}Pa0B;@$~U;0+bb70NV=-9*%6r5$pGWPJ@l$*HJ&cE(83hBeM~n zA@fmZJ68NaH!<|cn>nW9Q|g;K?zee9|8IpLkopyLWG`mD0dd?%VSq?|x+Afp|vOIO-biXkBuxR zc2fb9Ougbj#X&Z=$ov`uy!eruDde~d@J+H{kFTKaM9QR7yVY{UbLsv2@MUBre6{&W zVTA4IUU&F;E#DR7qaTJxehNSh&nb3)&?=n1?%4z-1KS{siFv}PKp!m5$%@#cH)u$%qF5z@$cG(4qOu4=C^&DszDsLt& z28e8dJaiB_kuF&BW-9=tn4S#M)!*GoX0dXcq3|A`a98P|HOjqCO_mb)Njf-7v>g(e^P!_OIO0tVr0=cMz#QYS>3 zK@z(NsFa1P#5M(0&#;EY80Sed@mFo+>&m(3z$hDZM`@FlIQ1WECTS=!A>Lun zgrCxJFu*Yi+w4Rd87x4DxP1j#<&#VMa%%d$jPt!O#gq^x+=0?*Q_<&@Fma!N z7h_gc?35q^$akWhoRU5HJ$#Kiv`=sE%Pg$kLHDDLW%z-<$^M$v>WwD@Lr@|+%iO2= zsv>>U@RqQ>-)?9dJ=s>!!V})7V!V12`jpCEq znOGzn5WG55n?n>K>*x(~O6dIqci{T z#M7gx{W5k38+EryIP5D_eTr0jHi`~KOegm20Gn6a`axIEb1Q`VkRA4nzSyz&9>n@) zJZdSZW{hCL+%qw|_^w@(Jw*!(!kFhRM6w6AT(beC7;mbhE-4xN;r$NEsWMR(G^2OA%~ueeEZ%xgXT4pdN#r)CR$Udx%~6bI;#l^D^kFUd+P zy#bPL-sNPafZ*3009#(f;`s|8*0SzhQQv+Wzp33vSejm-7uCIbX6Qpoal*UnNZB=Y zzF8j`@yW`*oHLui_J&`CYH1T21=gv@=!E0!Knl%wwEEgQzlIh>Q7DsYC^KAKX3&Y9NQ z5$HPVkPn$68TG|`htJ)F>kO!+wKw0VV^rL~H|v8~)XqYWq}wU_(U;xqE0TEZchv4~ zWXRA?Q_Xjb#Ot&E^pN~A1@fyRaTOp@4HDB=Lb(+~@r%f=7Xmw?MT22>_E$zEaitej zM?pOWXqF70D{dvJlaqJAnlbmdruhAiCPyy?x@Y}GiESVZZxpyY0w-?lmRZwd9~DGg zD#+Ro8;?$Ar*}!{nUw*6qyzH2gmg33I14zM#fq@Il^Pd;!ba#)Ry3ZbKhsJP5ze_T!mj;qR4|{&b z1BmyXsvo;JYI|YdpoPJbnZ+owa#tzB!7fNL5q?mZ#*)sj?_XwmFez%hukmGt!6Jzr zhsHl9pRdk04%LE2=Uk$>a?~(>B8lI$-&Hyzvt$JIqgmZuyADbudU+rgYBx3|qDV1v zrA{>p3~`*YSAdB6euVftr$<(>gl~HxzK$5PoV}sDoWvu__}UCjZ`L+sx*!>>4;OoM zB5{JGZs#!27og05VTq3&jVl+T(A&-p6c$UETQI6kaVmb4H_a32`ArGS)D8sfcCqvm zT!O~|BdoN5aTH!BDg*1SjT2nvxoZLFD#-vtU>(K}3ponSU}kpC`d0g1x!0F5 zVc;vd18V;C2(+jvKqRp#9tDgR&8U$XYh3wST&2x?wl~&&{;D`ypKiL~LymP$Q(T-p`3e&fS2@M>&bwZhJBtL#RKzj`N5_~ilicIQF=cBqZICA7b8{C!> zGEV5qpwV3xyJff%3cQ@V{b6i+uN7Q3`7wjT;JxPFLL^D2``b`%ICXG)3v~RFby+

OdBD>nQP8q|RSCj;(JolBP(kZ~ zoH6B#B<$#$Y#6b>(_6AJhzK+b41LZ1Ut)H)2$I7`bie}syOXK|4^!!e! z`5~_8UNiicVR`1NoXk>Jq7>N`pQSyB{laU78S<)&`T`P0Vt#kkHJ&~Ba~M~{+nd!M)jxuy*Y&@VrpA}i0t zhFCu&MFNGleqcCTZ1OOk+A=-mWMyVlQNK|+CN~gx#APt3#w%mL;|yh5;0H*MlH?)Rm{e{d-P+>(}0n(j%Sn_+W1}R&f#x2-oP< z?=MGLs`=J1%-!u)qI*4%@{QP}v0d*@BGfE&s-Hd+SSy}u6xcER0<{Nn@#&aY{0e2! z69FB!QfJPUHqLE_9znRt_GRu-_&y>nWj}(|7$j5=iH1125tz9exO-;kygd_XfM}fZ z1{z=S;6PGe9|`o1$}cLjq#&A&;!WrBp{%%- zg_b!92lv7wus0)>M-@t-cJQp8=Nr>EhXWTEzj|skfNlc-ac7jHP?wOKqb=qh_&5;k zG^xW763XY=p4bv;mi-|AI&Gm#LRW~ZZ1=tkB2GTv<}QZ&MN(#iBf6J-paB-m?G+6j z=`GRAMBSbi7iF+YehrtoVnOj>GCqQG7}egQ;kHKJU!4y4MZ1*&=8WB87(%uIHa4iq zPWK-=OKE5#p0IBRE7wS?KAFJ9cE94RT*jJ?*%0CN$pLqdVY(hhi^+AF9@HfWGBA1=@`XR()NQrq?cU zOaAI9Nt+Y1DD0?g67isZR{MyCSypHFi>7UTdD`H*W_BdZ@+((c@A~MARZpndGYci* zbj7Lag8HMK_=GM#3mMASW~SWgqCUIT5eId;m4Lb0;GpOBkU?04S#$#Wg~et`_{NGoub;9hI(wENbB1W0JDQsa96{qahn0y^w-!j z6Sh3-JA0_<{vGh<^T<#4-<3+0%B26kz-YJEFd8KYl~2#^6K$dh7$c47^gHCBgp|wT ztR^IBHfb3;af7o(EBus(l_1<#D#Q zg1$KnKlUg35`Jn>CF4{8Q#Y;6;gyuyvR%~S1X!%Frq zz^(6vxDY*g}0 zX@*Yik_l>iqXL(I2JoBQ!X(Lm(FfpVO`kd($M|6!-w`m=@pC`dh(p9;2{(mx{io-@ z=121{fEAO93?7d*3ja|I*2;);QVgYZ*Ys@1a0;msZMd-way%0x+4ub{XqLo%Qh%@m z!ER}~RyBF_-A9*?YH3@CP~_b_z3cq|+&#z|7~ANw^!ngzg*Yb=&Yk!s3MA7Jt2$B_Odm987kj zxl$Uyo9#I28X;%H{+hgq&cmT-6XQ1FcpGUc?VKCV34~(9_~ew!5)K02{K;!QEM%hK z?bTGye2Dg(7JCT9-Ru!;g`tc|=qqP^eg}1X06F|CQ%icxTcEppKJso~51y|`sE=>8 zA>Xb$xq|p;8R#CCpx`#dX^V?L+!HYK(9H+yq#yZng`jqL3$-`tACDO@4=dIPc2EZ|aO-4@NGH07EdST3%m zn8&NmMawJAbUCl$XUnr!$%pSo%mU;Ay=kMHmP$TWeK!734LdrpXN~`f-|<*n#L(sM zNG$-3K5T*x3?b@u?fx$k@$t2ElX}XZT2tU;AOJ;xhrsnOhIsw!V{-rhAO1@(8{Q}T z(7CwF#-K8a-Yyi>1A0DryN{X5%LAkt()o5#*wA_}`+sjM1i$|01G)d#7Y+HR;+oQR W>5fzDpxOq3oHn;PRdV9u^?wJ&!V7Nz literal 0 HcmV?d00001 diff --git a/src/pages/docs/ai-transport/features/sessions-identity/identifying-users-and-agents.mdx b/src/pages/docs/ai-transport/features/sessions-identity/identifying-users-and-agents.mdx new file mode 100644 index 0000000000..4614747840 --- /dev/null +++ b/src/pages/docs/ai-transport/features/sessions-identity/identifying-users-and-agents.mdx @@ -0,0 +1,419 @@ +--- +title: "Identifying users and agents" +meta_description: "Establish trusted identity and roles in decoupled AI sessions" +meta_keywords: "user authentication, agent identity, JWT authentication, token authentication, verified identity, capabilities, authorization, user claims, RBAC, role-based access control, API key authentication, message attribution" +--- + +Secure AI applications require agents to trust who sent each message and understand what that sender is authorized to do. Ably's identity system uses token-based authentication to provide cryptographically-verified identities with custom attributes that you can access throughout your applications. + +## Why identity matters + +In decoupled architectures, identity serves several critical purposes: + +- Prevent spoofing: Without verified identity, malicious users could impersonate others by claiming to be someone else. Ably supports cryptographically binding each client's identity to their credentials, making spoofing impossible. +- Message attribution: Agents need to know whether messages come from users or other agents. This is essential for conversation flows in which agent responses should be securely distinguished from user prompts. +- Personalized behavior: Different users may have different privileges or attributes. A premium user might get access to more capable models, while a free user gets basic functionality. Ably allows your trusted authentication server to embed this information in the client's credentials, allowing this information to be securely passed to agents. +- Authorization decisions: Some operations should only be performed for specific users. For example, human-in-the-loop (HITL) tool calls that access sensitive data might require admin privileges. Ably allows agents to verify the privilege level and role of the user resolving the tool call. + +## Authenticating users + +Use [token authentication](/docs/auth/token) to authenticate users securely. Your authentication server generates a token that is signed with the secret part of your Ably API key. Clients use this token to connect to Ably, and the token signature ensures it cannot be tampered with. + +The following examples use [JWT authentication](/docs/auth/token#jwt) for its simplicity and standard tooling support. For other approaches, see [token authentication](/docs/auth/token). + +Create a server endpoint that generates signed JWTs after verifying user authentication: + + +```javascript +// Server code +import express from "express"; +import jwt from "jsonwebtoken"; + +const app = express(); + +// Mock authentication middleware. +// This should be replaced with your actual authentication logic. +function authenticateUser(req, res, next) { + // Assign a mock user ID for demonstration + req.session = { userId: "user123" }; + next(); +} + +// Return the claims payload to embed in the signed JWT. +function getJWTClaims(userId) { + // Returns an empty payload, so the token + // inherits the capabilities of the signing key. + return {}; +} + +// Define an auth endpoint used by the client to obtain a signed JWT +// which it can use to authenticate with the Ably service. +app.get("/api/auth/token", authenticateUser, (req, res) => { + const [keyName, keySecret] = "{{API_KEY}}".split(":"); + + // Sign a JWT using the secret part of the Ably API key. + const token = jwt.sign(getJWTClaims(req.session.userId), keySecret, { + algorithm: "HS256", + keyid: keyName, + expiresIn: "1h", + }); + + res.type("application/jwt").send(token); +}); + +app.listen(3001); +``` + + +

+ +The JWT is signed with the secret part of your Ably API key using [HMAC-SHA-256](https://datatracker.ietf.org/doc/html/rfc4868). This example does not embed any claims in the JWT payload, so by default the token inherits the capabilities of the Ably API key used to sign the token. + +Configure your client to obtain a signed JWT from your server endpoint using an [`authCallback`](/docs/auth/token#auth-callback). The client obtains a signed JWT from the callback and uses it to authenticate requests to Ably. The client automatically makes a request for a new token before it expires. + + + + +```javascript +// Client code +import * as Ably from "ably"; + +const ably = new Ably.Realtime({ + authCallback: async (tokenParams, callback) => { + try { + const response = await fetch("/api/auth/token"); + const token = await response.text(); + callback(null, token); + } catch (error) { + callback(error, null); + } + } +}); + +ably.connection.on("connected", () => { + console.log("Connected to Ably"); +}); +``` + + +## Authenticating agents + +Agents typically run on servers in trusted environments where API keys can be securely stored. Use [API key authentication](/docs/auth#basic-authentication) to authenticate agents directly with Ably. + + +```javascript +// Agent code +import * as Ably from "ably"; + +const ably = new Ably.Realtime({ + key: "{{API_KEY}}" +}); + +ably.connection.on("connected", () => { + console.log("Connected to Ably"); +}); +``` + + + + + + +## Specifying capabilities + +Use [capabilities](/docs/auth/capabilities) to specify which operations clients can perform on which channels. This applies to both users and agents, allowing you to enforce fine-grained permissions. + +### User capabilities + +Add the [`x-ably-capability`](/docs/api/realtime-sdk/authentication#ably-jwt) claim to your JWT to specify the allowed capabilities of a client. This allows you to enforce fine-grained permissions, such as restricting some users to only subscribe to messages while allowing others to publish. + +Update your `getJWTClaims` function to specify the allowed capabilities for the authenticated user: + + +```javascript +// Server code + +// Return the claims payload to embed in the signed JWT. +// Includes the `x-ably-capabilities` claim, which controls +// which operations the user can perform on which channels. +function getJWTClaims(userId) { + const orgId = "acme"; // Mock organization ID for demonstration + const capabilities = { + // The user can publish and subscribe to channels within the organization, + // that is, any channel matching `org:acme:*`. + [`org:${orgId}:*`]: ["publish", "subscribe"], + // The user can only subscribe to the `announcements` channel. + announcements: ["subscribe"], + }; + return { + "x-ably-capability": JSON.stringify(capabilities), + }; +} +``` + + +When a client authenticates with this token, Ably enforces these capabilities server-side. Any attempt to perform unauthorized operations will be rejected. For example, a client with the capabilities above can publish to channels prefixed with `org:acme:`, but an attempt to publish to a channel prefixed with `org:foobar:` will fail with error code [`40160`](/docs/platform/errors/codes#40160): + + +```javascript +// Client code +const acmeChannel = ably.channels.get("org:acme:{{RANDOM_CHANNEL_NAME}}"); +await acmeChannel.publish("prompt", "What is the weather like today?"); // succeeds + +const foobarChannel = ably.channels.get("org:foobar:{{RANDOM_CHANNEL_NAME}}"); +await foobarChannel.publish("prompt", "What is the weather like today?"); // fails + +const announcementsChannel = ably.channels.get("announcements"); +await announcementsChannel.publish("prompt", "What is the weather like today?"); // fails +await announcementsChannel.subscribe((msg) => console.log(msg)); // succeeds +``` + + + + +### Agent capabilities + +When using API key authentication, provision API keys through the [Ably dashboard](https://ably.com/dashboard) or [Control API](/docs/account/control-api) with only the capabilities required by the agent. + +The following example uses the Control API to create an API key with specific capabilities for a weather agent: + + + + +```shell +curl --location --request POST 'https://control.ably.net/v1/apps/{{APP_ID}}/keys' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer ${ACCESS_TOKEN}' \ +--data-raw '{ + "name": "weather-agent-key", + "capability": { + "org:acme:weather:*": ["publish", "subscribe"] + } +}' +``` + + +This creates an API key that can only publish and subscribe on channels matching `org:acme:weather:*`. The agent can then use this key to authenticate: + + +```javascript +// Agent code +const weatherChannel = ably.channels.get("org:acme:weather:{{RANDOM_CHANNEL_NAME}}"); +await weatherChannel.subscribe((msg) => console.log(msg)); // succeeds +await weatherChannel.publish("update", "It's raining in London"); // succeeds + +const otherChannel = ably.channels.get("org:acme:other:{{RANDOM_CHANNEL_NAME}}"); +await otherChannel.subscribe((msg) => console.log(msg)); // fails +await otherChannel.publish("update", "It's raining in London"); // fails +``` + + + + +## Establishing verified identity + +Use the [`clientId`](/docs/messages#properties) to identify the user or agent that published a message. The method for setting `clientId` depends on your authentication approach: + +- When using [basic authentication](/docs/auth/identified-clients#basic), specify the `clientId` directly in the client options when instantiating the client instance. +- When using [token authentication](/docs/auth/identified-clients#token), specify an explicit `clientId` when issuing the token. + +### User identity + +Users typically authenticate using [token authentication](/docs/auth/identified-clients#token). Add the [`x-ably-clientId`](/docs/api/realtime-sdk/authentication#ably-jwt) claim to your JWT to establish a verified identity for each user client. This identity appears as the [`clientId`](/docs/messages#properties) in all messages the user publishes, and subscribers can trust this identity because only your server can issue JWTs with specific `clientId` values. + +As with all clients, the method for setting `clientId` depends on your [authentication approach](#identity). + +Update your `getJWTClaims` function to specify a `clientId` for the user: + + +```javascript +// Return the claims payload to embed in the signed JWT. +function getJWTClaims(userId) { + // Returns a payload with the `x-ably-clientId` claim, which ensures + // that the user's ID appears as the `clientId` on all messages + // published by the client using this token. + return { "x-ably-clientId": userId }; +} +``` + + +When a client authenticates using this token, Ably's servers automatically attach the `clientId` specified in the token to every message the user publishes: + + +```javascript +// Client code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +// Publish a message - the clientId is automatically attached +await channel.publish("prompt", "What is the weather like today?"); +``` + + +Agents can then access this verified identity to identify the sender: + + +```javascript +// Agent code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +// Subscribe to messages from clients +await channel.subscribe("prompt", (message) => { + // Access the verified clientId from the message + const userId = message.clientId; + const prompt = message.data; + + console.log(`Received message from user: ${userId}`); + console.log(`Prompt:`, prompt); +}); +``` + + +The `clientId` in the message can be trusted, so agents can use this identity to make decisions about what actions the user can take. For example, agents can check user permissions before executing tool calls, route messages to appropriate AI models based on subscription tiers, or maintain per-user conversation history and context. + +### Agent identity + +Agent code typically runs in a trusted environment, so you can use [basic authentication](/docs/auth/identified-clients#basic) and directly specify the `clientId` when instantiating the agent client. This identity appears as the [`clientId`](/docs/messages#properties) in all messages the agent publishes, allowing subscribers to identify the agent which published a message. + + +```javascript +// Agent code +import * as Ably from "ably"; + +const ably = new Ably.Realtime({ + key: "{{API_KEY}}", + // Specify an identity for this agent + clientId: "weather-agent" +}); +``` + + +When subscribers receive messages, they can use the `clientId` to determine which agent published the message: + + +```javascript +// Client code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +await channel.subscribe((message) => { + if (message.clientId === "weather-agent") { + console.log("Weather agent response:", message.data); + } +}); +``` + + + + +## Adding roles and attributes + +Embed custom roles and attributes in messages to enable role-based access control (RBAC) and convey additional context about users and agents. This enables agents to make authorization decisions without additional database lookups. + +### User claims + +Use [authenticated claims for users](/docs/auth/capabilities#custom-restrictions-on-channels-) to embed custom claims in JWTs that represent user roles or attributes. + +Add claims with names matching the `ably.channel.*` pattern to your JWT to specify user claims for specific channels. Claims can be scoped to individual channels or to [namespaces](/docs/channels#namespaces) of channels. The most specific user claim matching the channel is automatically included under `extras.userClaim` in all messages the client publishes. + +Update your `getJWTClaims` function to specify some user claims: + + +```javascript +// Return the claims payload to embed in the signed JWT. +function getJWTClaims(userId) { + // Returns a payload with `ably.channel.*` claims, which ensures that + // the most specific claim appears as the `message.extras.userClaim` + // on all messages published by the client using this token. + return { + // The user is an editor on all acme channels. + "ably.channel.org:acme:*": "editor", + // The user is a guest on all other channels. + "ably.channel.*": "guest", + }; +} +``` + + +When a client authenticates with a JWT containing `ably.channel.*` claims, Ably automatically includes the most specific matching claim value in the `message.extras.userClaim` field on messages published by the client: + + +```javascript +// Agent code +const channel = ably.channels.get("org:acme:{{RANDOM_CHANNEL_NAME}}"); + +// Subscribe to user prompts +await channel.subscribe("prompt", async (message) => { + // Access the user's role from the user claim in message extras + const role = message.extras?.userClaim; + + console.log(`Message from user with role: ${role}`); +}); +``` + + +The `message.extras.userClaim` in the message can be trusted, so agents can rely on this information to make decisions about what actions the user can take. For example, an agent could allow users with an "editor" role to execute tool calls that modify documents, while restricting users with a "guest" role to read-only operations. + +### Agent metadata + +Use [`message.extras.headers`](/docs/api/realtime-sdk/types#extras) to include custom metadata in agent messages, such as agent roles or attributes. + +Agents can directly specify metadata in `message.extras.headers`. Since agents run as trusted code in server environments, this metadata can be trusted by subscribers. This is useful for communicating agent characteristics, such as which model the agent uses, the agent's role in a multi-agent system, or version information. + + + + +```javascript +// Agent code +import * as Ably from "ably"; + +const ably = new Ably.Realtime({ + key: "{{API_KEY}}", + clientId: "weather-agent" +}); + +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +await channel.publish({ + name: "update", + data: "It's raining in London", + extras: { + headers: { + model: "gpt-4" + } + } +}); +``` + + +Clients and other agents can access this metadata when messages are received: + + +```javascript +// Client code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +await channel.subscribe((message) => { + if (message.clientId === "weather-agent") { + const model = message.extras?.headers?.model; + console.log(`Response from weather agent using ${model}:`, message.data); + } +}); +``` + diff --git a/src/pages/docs/ai-transport/features/sessions-identity/index.mdx b/src/pages/docs/ai-transport/features/sessions-identity/index.mdx new file mode 100644 index 0000000000..e1b9be5a05 --- /dev/null +++ b/src/pages/docs/ai-transport/features/sessions-identity/index.mdx @@ -0,0 +1,63 @@ +--- +title: "Sessions & identity overview" +meta_description: "Manage session lifecycle and identity in decoupled AI architectures" +meta_keywords: "AI sessions, session management, channel-oriented sessions, connection-oriented sessions, session persistence, session lifecycle, identity management, decoupled architecture, session resumption, multi-device, multi-user" +--- + +Ably AI Transport provides robust session management and identity capabilities designed for modern AI applications. Sessions persist beyond individual connections, enabling agents and clients to connect independently through shared channels. Built-in token-based authentication provides verified user identity and fine-grained authorization for channel operations. + +## What is a session? + +A session is an interaction between a user (or multiple users) and an AI agent where messages and data are exchanged, building up shared context over time. In AI Transport, sessions are designed to persist beyond the boundaries of individual connections, enabling modern AI experiences where users expect to: + +- Resume conversations across devices: Start a conversation on mobile and seamlessly continue on desktop with full context preserved +- Return to long-running work: Close the browser while agents continue processing in the background, delivering results when you return +- Recover from interruptions: Experience connection drops, browser refreshes, or network instability without losing conversation progress +- Collaborate in shared sessions: Multiple users can participate in the same conversation simultaneously and remain in sync + +These capabilities represent a fundamental shift from traditional request/response AI experiences to continuous, resumable interactions that remain accessible across all user devices and locations. Sessions have a lifecycle: they begin when a user starts interacting with an agent, remain active while the interaction continues, and can persist even when users disconnect - enabling truly asynchronous AI workflows. + +Managing this lifecycle in AI Transport's decoupled architecture involves detecting when users are present, deciding when to stop or continue agent work, and handling scenarios where users disconnect and return. + +## Connection-oriented vs channel-oriented sessions + +In traditional connection-oriented architectures, sessions are bound to the lifecycle of a WebSocket or SSE connection: + +1. Client opens connection to agent server to establish a session +2. Agent streams response over the connection +3. When the connection closes, the session ends + +This tight coupling means network interruptions terminate sessions, agents cannot continue work after disconnections, and supporting multiple devices or users introduces significant complexity. + +AI Transport uses a channel-oriented model where sessions persist independently of individual connections. Clients and agents communicate through [Channels](/docs/channels): + +1. Client sends a single request to agent server to establish a session +2. Server responds with a unique ID for the session, which is used to identify the channel +3. All further communication happens over the channel + +In this model, sessions are associated with the channel, enabling seamless reconnection, background agent work, and multi-device access without additional complexity. + +![AI Transport architecture](../../../../../images/content/diagrams/ai-transport-before-and-after.png) + + +The channel-oriented model provides key benefits for modern AI applications: sessions maintain continuity in the face of disconnections, users can refresh or navigate back to the ongoing session, multiple users or devices can participate in the same session, and agents can continue long-running or asynchronous workloads even when clients disconnect. + +The following table compares how each architecture addresses the engineering challenges of delivering these capabilities: + +| Challenge | Connection-oriented sessions | Channel-oriented sessions | +|-----------|------------------------------|---------------------------| +| Routing | Agents must track which instance holds each session. Reconnecting clients need routing logic to find the correct agent instance across your infrastructure. | Agents and clients only need the channel name. Ably handles message delivery to all subscribers without agents tracking sessions or implementing routing logic. | +| Message resume | Agents must buffer sent messages and implement replay logic. When clients reconnect, agents must determine what was missed and retransmit without duplicates or gaps, distinctly for each connection. | When clients reconnect, they automatically receive messages published while disconnected. The channel maintains history without agents implementing buffering or replay logic, eliminating the need for server-side session state. | +| Abandonment detection | Agents must implement logic to distinguish between brief network interruptions and users who have actually left, so they can decide whether to continue work or clean up resources. | Built-in presence tracking signals when users enter and leave channels, providing clear lifecycle events to agents without custom detection logic. | +| Multi-user and multi-device | Agents must manage multiple concurrent connections from the same user across devices, or from multiple users in collaborative sessions. This requires tracking connections, synchronizing state, and ensuring all participants receive consistent updates. | Multiple users and devices can connect to the same channel. The channel handles message delivery to all participants, simplifying agent logic for multi-user and multi-device scenarios. | + +## Identity in channel-oriented sessions + +In connection-oriented architectures, the agent server handles authentication directly when establishing the connection. When the connection is opened, the server verifies credentials and associates the authenticated user identity with that specific connection. + +In channel-oriented sessions, agents don't manage connections or handle authentication directly. Instead, your server authenticates users and issues tokens that control their access to channels. Ably enforces these authorization rules and provides verified identity information to agents, giving you powerful capabilities for managing who can participate in sessions and what they can do: + +- Verified identity: Agents automatically receive the authenticated identity of message senders, with cryptographic guarantees that identities cannot be spoofed +- Fine-grained authorization: Control precisely what operations each user can perform on specific channels through fine-grained capabilities +- Rich user attributes: Pass authenticated user data to agents for personalized behavior without building custom token systems +- Role-based participation: Distinguish between different types of participants, such as users and agents, to customize behaviour based on their role diff --git a/src/pages/docs/ai-transport/features/sessions-identity/online-status.mdx b/src/pages/docs/ai-transport/features/sessions-identity/online-status.mdx new file mode 100644 index 0000000000..7a852d482f --- /dev/null +++ b/src/pages/docs/ai-transport/features/sessions-identity/online-status.mdx @@ -0,0 +1,280 @@ +--- +title: "Online status" +meta_description: "Use Ably Presence to show which users and agents are currently connected to an AI session" +meta_keywords: "presence, online status, multi-device, multi-user, session abandonment, async workflows" +--- + +Modern AI applications require agents to know when users are online, when they've fully disconnected, and how to handle users connected across multiple devices. Ably's [Presence](/docs/presence-occupancy/presence) feature provides realtime online status with automatic lifecycle management, allowing agents to decide when to continue processing, when to wait for user input, and when to clean up resources. Presence detects which users and agents are currently connected to a session, distinguishes between a single device disconnecting and a user going completely offline, and enables responsive online/offline indicators. + +## Why online status matters + +In channel-oriented sessions, online status serves several critical purposes: + +- Session abandonment detection: Agents need to know when users have fully disconnected to decide whether to continue processing, pause work, or clean up resources. Presence provides reliable signals when all of a user's devices have left the session. +- Multi-device coordination: A single user can connect from multiple devices simultaneously. Presence tracks each connection separately while maintaining stable identity across devices, allowing you to distinguish between "one device left" and "user completely offline". +- Agent availability signaling: Clients need to know when agents are online and ready to process requests. Agents can enter presence to advertise availability and leave when they complete work or shut down. +- Collaborative session awareness: In sessions with multiple users, participants can see who else is currently present. This enables realtime collaboration features and helps users understand the current session context. + +## Going online + +Use the [`enter()`](/docs/presence-occupancy/presence#enter) method to signal that a user or agent is online. When a client enters presence, they are added to the presence set and identified by their `clientId`. You can optionally include data when entering presence to communicate additional context. + + + +You have flexibility in when to enter presence. For example, an agent might choose to appear as online only while processing a specific task, or remain present for the duration of the entire session. Users typically enter presence when they connect to a session and remain present until they disconnect. + + + +For example, a user client can enter presence when joining a session: + + +```javascript +// Client code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +// Enter presence with metadata about the user's device +await channel.presence.enter({ + device: "mobile", + platform: "ios" +}); +``` + + +Similarly, an agent can enter presence to signal that it's online: + + +```javascript +// Agent code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +// Enter presence with metadata about the agent +await channel.presence.enter({ + model: "gpt-4" +}); +``` + + +### Going online from multiple devices + +A single user can be present on a channel from multiple devices simultaneously. Ably tracks each connection separately using a unique [`connectionId`](/docs/connect#connection-ids), while maintaining the same [`clientId`](/docs/auth/identified-clients#assign) across all connections. + +When a user connects from multiple devices, each device enters presence independently. All connections share the same `clientId` but have different `connectionId` values. + +For example, when the user connects from their desktop browser: + + +```javascript +// Client code (device 1: desktop browser) +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); +await channel.presence.enter({ device: "desktop" }); +``` + + +And then connects from their mobile app while still connected on desktop: + + +```javascript +// Client code (device 2: mobile app) +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); +await channel.presence.enter({ device: "mobile" }); +``` + + +Both devices are now members of the presence set with the same `clientId` but different `connectionId` values. When you query the presence set, you'll see two separate entries: + + +```javascript +// Query presence to see both devices +const members = await channel.presence.get(); +for (const { clientId, connectionId, data } of members) { + console.log(clientId, connectionId, data); +} +// Example output: +// user-123 hd67s4!abcdef-0 { device: "desktop" } +// user-123 hd67s4!ghijkl-1 { device: "mobile" } +``` + + +When either device leaves or disconnects, the other device remains in the presence set. + +## Going offline + +Clients can go offline in two ways: explicitly by calling the leave method, or automatically when Ably detects a disconnection. + +### Explicitly going offline + +Use the [`leave()`](/docs/presence-occupancy/presence#leave) method when a user or agent wants to mark themselves as offline. This immediately notifies presence subscribers on the channel and removes the entry from the presence set, even if they remain connected to Ably. + + + +For example, a user client can explicitly leave presence: + + +```javascript +// Client code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +// Leave presence when the user marks themselves offline +await channel.presence.leave(); +``` + + +Similarly, an agent can leave presence when it completes its work or shuts down: + + +```javascript +// Agent code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +// Leave presence when the agent shuts down +await channel.presence.leave(); +``` + + +Optionally include data when leaving presence to communicate the reason for going offline. This data is delivered to presence subscribers listening to `leave` events and is also available in [presence history](/docs/presence-occupancy/presence#history): + + +```javascript +// Leave with a reason +await channel.presence.leave({ + reason: "session-completed", + timestamp: Date.now() +}); +``` + + +Subscribers receive the `leave` data in the presence message: + + +```javascript +// Subscribe to leave events to see why members left +await channel.presence.subscribe("leave", (presenceMessage) => { + console.log(`${presenceMessage.clientId} left`); + if (presenceMessage.data) { + console.log(`Reason: ${presenceMessage.data.reason}`); + } +}); +``` + + +### Going offline after disconnection + +When a client loses connection unexpectedly, Ably detects the lost connection and automatically leaves the client from the presence set. + +By default, clients remain present for 15 seconds after an abrupt disconnection. This prevents excessive enter/leave events during brief network interruptions. If the client reconnects within this window, they remain in the presence set without triggering leave and reenter events. + +Use the `transportParams` [client option](/docs/api/realtime-sdk#client-options) to configure disconnection detection and presence lifecycle behaviour. After an abrupt disconnection, the `heartbeatInterval` transport parameter controls how quickly Ably detects the dead connection, while the `remainPresentFor` option controls how long the member is kept in presence before Ably emits the leave event. + + + +For example, if implementing resumable agents using techniques such as durable execution, configure a longer `remainPresentFor` period to allow time for the new agent instance to come online and resume processing before the previous instance appears as offline. This provides a seamless handoff: + + +```javascript +// Agent code +const ably = new Ably.Realtime({ + key: "{{API_KEY}}", + clientId: "weather-agent", + // Allow 30 seconds for agent resume and reconnection + transportParams: { + remainPresentFor: 30000 + } +}); +``` + + +## Viewing who is online + +Participants in a session can query the current presence set or subscribe to presence events to see who else is online and react to changes in realtime. Users might want to see which agents are processing work, while agents might want to detect when specific users are offline to pause or cancel work. + + + +### Retrieving current presence members + +Use [`presence.get()`](/docs/api/realtime-sdk/presence#get) to retrieve the current list of users and agents in the session. Each presence member is uniquely identified by the combination of their `clientId` and `connectionId`. This is useful for showing who is currently available or checking if a specific participant is online before taking action. + + +```javascript +// Get all currently present members +const members = await channel.presence.get(); + +// Display each member - the same user will appear once per distinct connection +members.forEach((member) => { + console.log(`${member.clientId} (connection: ${member.connectionId})`); +}); +``` + + +### Subscribing to presence changes + +Use [`presence.subscribe()`](/docs/api/realtime-sdk/presence#subscribe) to receive realtime notifications when users or agents enter or leave the session. This enables building responsive UIs that show online users, or implementing agent logic that reacts to user connectivity changes. + + +```javascript +// Client code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +// Subscribe to changes to the presence set +await channel.presence.subscribe(async (presenceMessage) => { + // Get the current synced presence set after any change + const members = await channel.presence.get(); + + // Display each member - the same user will appear once per distinct connection + members.forEach((member) => { + console.log(`${member.clientId} (connection: ${member.connectionId})`); + }); +}); +``` + + +You can also subscribe to specific presence events: + + +```javascript +// Subscribe only to enter events +await channel.presence.subscribe("enter", (presenceMessage) => { + console.log(`${presenceMessage.clientId} joined on connection ${presenceMessage.connectionId}`); +}); + +// Subscribe only to leave events +await channel.presence.subscribe("leave", (presenceMessage) => { + console.log(`${presenceMessage.clientId} left on connection ${presenceMessage.connectionId}`); +}); +``` + + +### Detecting when a user is offline on all devices + +Agents can monitor presence changes to detect when a specific user has gone completely offline across all devices. This is useful for deciding whether to pause expensive operations, cancel ongoing work, deprioritize tasks, or schedule work for later. + + +```javascript +// Agent code +const channel = ably.channels.get("{{RANDOM_CHANNEL_NAME}}"); + +await channel.presence.subscribe(async (presenceMessage) => { + // Get the current synced presence set + const members = await channel.presence.get(); + + // Check if all clients are offline + if (members.length === 0) { + console.log(`All clients are offline`); + } + + // Check if a specific client is offline + if (!members.map(m => m.clientId).includes(targetUserId)) { + console.log(`${targetUserId} is now offline on all devices`); + } +}); +``` + diff --git a/src/pages/docs/ai-transport/features/sessions-identity/resuming-sessions.mdx b/src/pages/docs/ai-transport/features/sessions-identity/resuming-sessions.mdx new file mode 100644 index 0000000000..558bd36d1c --- /dev/null +++ b/src/pages/docs/ai-transport/features/sessions-identity/resuming-sessions.mdx @@ -0,0 +1,140 @@ +--- +title: Resuming sessions +description: How clients and agents reconnect to ongoing AI Transport sessions after network interruptions or service restarts +meta_keywords: "session resumption, reconnection, hydration, presence sync, conversation history, channel history, untilAttach, durable execution, agent restart, message recovery, failover" +--- + +AI Transport uses a channel-oriented model where sessions persist independently of individual connections. Both users and agents can disconnect and rejoin without ending the session. When users or agents rejoin, they need to resume the session from where they left off. + +An agent or user might resume an existing session when: + +- A user goes offline or navigates away before returning, expecting to see the latest conversation state +- An agent goes offline and comes back online when the user returns +- An agent resumes after a failover or service restart + +## Hydrating presence + +When you attach to a channel, Ably automatically syncs the complete current presence set to your client. You can then query the presence set or subscribe to presence events without any additional hydration steps. This works the same way for both users and agents. + +For details on obtaining the synced presence set, see [Viewing who is online](/docs/ai-transport/sessions-and-identity/online-status#viewing-presence). + +## User resumes a session + +Users resume by reattaching to the same session channel and hydrating the conversation transcript, in-progress model output, or other session state. + +### Hydrating conversation history + +The hydration strategy you choose depends on your application model and your chosen approach to token streaming. Clients typically hydrate conversation state using one of these patterns: + +- Hydrate entirely from the channel: Use [rewind](/docs/channels/options/rewind) or [history](/docs/storage-history/history) to obtain previous messages on the channel. +- Hydrate in-progress responses from the channel: Load completed messages from your database and catch up on any in-progress responses from the channel. + +For detailed examples of hydrating the token stream, see the token streaming documentation: +- [Message-per-response hydration](/docs/ai-transport/features/token-streaming/message-per-response#hydration) +- [Message-per-token hydration](/docs/ai-transport/features/token-streaming/message-per-token#hydration) + +## Agent resumes a session + +When an agent restarts, it needs to resume from where it left off. This involves two distinct concerns: + +1. **Recovering the agent's execution state**: The current step in the workflow, local variables, function call results, pending operations, and any other state needed to continue execution. This state is internal to the agent and typically not visible to users. + +2. **Catching up on session activity**: Any user messages, events, or other activity that occurred while the agent was offline. + +These are separate problems requiring different solutions. Agent execution state is handled by your application and you choose how to persist and restore the internal state your agent needs to resume. + + + +Ably provides access to channel message history, enabling agents to retrieve any messages sent while they were offline. When your agent comes back online, it reattaches to the same channel and catches up on messages it missed. This channel-oriented model provides several key benefits: + +- Guaranteed message delivery: Clients can continue publishing messages even while the agent faults and relocates since the channel exists independently of the agent +- Reliable catch-up: The agent can retrieve any messages published during the interim when it comes back online +- Ordered delivery: Messages are delivered in the order they were published, ensuring agents process events in the correct sequence +- Channel-based addressing: The agent only needs the channel name to reconnect, no need to track individual client connections or manage connection state + +### Catching up on messages using history + +When an agent resumes, it needs to retrieve messages published while it was offline. Use [channel history](/docs/storage-history/history) with the [`untilAttach` option](/docs/storage-history/history#continuous-history) to catch up on historical messages while preserving continuity with live message delivery. + + + +#### Persisted session state + +Your agent should persist the following state to enable resumption: + +- Channel name: The channel the agent was processing +- Last processed timestamp: The timestamp of the last message successfully processed by the agent + +This state allows the agent to reconnect to the correct channel and retrieve only the messages it missed. + +#### Catching up with continuity + +The recommended pattern uses `untilAttach` to paginate backwards through history while maintaining continuity with live message delivery. This ensures no messages are lost between history retrieval and subscription. + + + + +```javascript +// Agent code +import * as Ably from 'ably'; + +const ably = new Ably.Realtime({ + key: process.env.ABLY_API_KEY, + clientId: 'agent:assistant' +}); + +// Load persisted session state +const channelName = await loadChannelName(); +const lastProcessedTimestamp = await loadLastProcessedTimestamp(); + +// Use a channel in a namespace with persistence enabled +// to access more than 2 minutes of message history +const channel = ably.channels.get(channelName); + +// Subscribe to live messages (implicitly attaches the channel) +await channel.subscribe('prompt', (message) => { + // Process the live message + processMessage(message); + + // Persist the timestamp after successful processing + saveLastProcessedTimestamp(message.timestamp); +}); + +// Fetch history up until the point of attachment, starting from last checkpoint +let page = await channel.history({ + untilAttach: true, + start: lastProcessedTimestamp, + direction: 'forwards' +}); + +// Paginate through all missed messages +while (page) { + for (const message of page.items) { + // Process the historical message + await processMessage(message); + + // Persist the timestamp after successful processing + await saveLastProcessedTimestamp(message.timestamp); + } + + // Move to next page if available + page = page.hasNext() ? await page.next() : null; +} +``` + + + + +This pattern provides guaranteed continuity between historical and live message processing by ensuring that: + +1. The subscription starts receiving live messages immediately when you subscribe +2. History retrieval stops exactly at the point the channel attached +3. No messages are lost between the end of history and the start of live delivery From f5ce09165f908f138c55bc84a7ab956b358fd0fc Mon Sep 17 00:00:00 2001 From: matt423 Date: Mon, 5 Jan 2026 11:38:46 +0000 Subject: [PATCH 23/59] chore: update message annotations terminology to include appends --- examples/pub-sub-message-annotations/javascript/README.md | 2 +- .../pub-sub-message-annotations/javascript/src/config.ts | 2 +- .../pub-sub-message-annotations/javascript/src/script.ts | 4 +++- .../features/token-streaming/message-per-response.mdx | 4 ++-- src/pages/docs/api/realtime-sdk/channels.mdx | 2 +- src/pages/docs/api/realtime-sdk/messages.mdx | 2 +- src/pages/docs/api/rest-api.mdx | 4 ++-- src/pages/docs/api/rest-sdk/channels.mdx | 2 +- src/pages/docs/channels/index.mdx | 2 +- src/pages/docs/chat/integrations.mdx | 2 +- src/pages/docs/messages/annotations.mdx | 6 +++--- src/pages/docs/messages/index.mdx | 2 +- src/pages/docs/messages/updates-deletes.mdx | 6 +++--- src/pages/docs/platform/errors/codes.mdx | 6 +++--- 14 files changed, 24 insertions(+), 22 deletions(-) diff --git a/examples/pub-sub-message-annotations/javascript/README.md b/examples/pub-sub-message-annotations/javascript/README.md index 7a2a50c046..18773b9ad1 100644 --- a/examples/pub-sub-message-annotations/javascript/README.md +++ b/examples/pub-sub-message-annotations/javascript/README.md @@ -67,7 +67,7 @@ This example demonstrates: yarn install ``` -6. Enable the "Annotations, updates and deletes" channel rule that matches the channel name you'll be using (by default we use a channel name of `annotation:pub-sub-message-annotations`, so if using this, [create this rule](https://ably.com/docs/channels#rules) for the "annotation" channel namespace). +6. Enable the "Annotations, updates, deletes, and appends" channel rule that matches the channel name you'll be using (by default we use a channel name of `annotation:pub-sub-message-annotations`, so if using this, [create this rule](https://ably.com/docs/channels#rules) for the "annotation" channel namespace). 7. Run the server: diff --git a/examples/pub-sub-message-annotations/javascript/src/config.ts b/examples/pub-sub-message-annotations/javascript/src/config.ts index 5a0ffcf0ae..32996b5600 100644 --- a/examples/pub-sub-message-annotations/javascript/src/config.ts +++ b/examples/pub-sub-message-annotations/javascript/src/config.ts @@ -1,6 +1,6 @@ export const urlParams = new URLSearchParams(window.location.search); export const clientId = urlParams.get('clientId') || 'user1'; -// Remember to enable the "Annotations, updates, and deletes" channel rule for the channel +// Remember to enable the "Annotations, updates, deletes, and appends" channel rule for the channel // namespace you're using (the first colon-delimited segment, here, "annotation") export const channelName = `annotation:${import.meta.env.VITE_NAME ?? 'annotation:pub-sub-message-annotations'}`; diff --git a/examples/pub-sub-message-annotations/javascript/src/script.ts b/examples/pub-sub-message-annotations/javascript/src/script.ts index 924db6e11c..67af06ca0d 100644 --- a/examples/pub-sub-message-annotations/javascript/src/script.ts +++ b/examples/pub-sub-message-annotations/javascript/src/script.ts @@ -27,7 +27,9 @@ async function main() { // Regular messages will be received as message.create events. getChannel().subscribe((message) => { if (!hasSerial(message)) { - console.error('Received message without serial (this indicates that you need to enable the "Annotations, updates, and deletes" feature in channel rules)'); + console.error( + 'Received message without serial (this indicates that you need to enable the "Annotations, updates, deletes, and appends" feature in channel rules)', + ); return; } if (message.action === 'message.summary') { diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx index b91bf42ae1..dab2c838f5 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx @@ -22,7 +22,7 @@ Standard Ably message [size limits](/docs/platform/pricing/limits#message) apply ## Enable appends -Message append functionality requires the "Message annotations, updates, and deletes" [channel rule](/docs/channels#rules) enabled for your channel or [namespace](/docs/channels#namespaces). +Message append functionality requires the "Message annotations, updates, deletes, and appends" [channel rule](/docs/channels#rules) enabled for your channel or [namespace](/docs/channels#namespaces). ## Subscribing to tool calls @@ -154,7 +154,7 @@ await channel.subscribe((message) => { ## Generative UI @@ -184,7 +184,7 @@ await channel.subscribe((message) => { ## Client-side tools @@ -199,7 +199,7 @@ Client-side tool calls follow a request-response pattern over Ably channels: 4. The agent receives the result and continues processing. The client subscribes to tool call requests, executes the tool using device APIs, and publishes the result back to the channel. The `toolCallId` enables correlation between tool call requests and results: @@ -266,5 +266,5 @@ Tool calls resolved by humans are one approach to implementing human-in-the-loop For example, a tool that modifies data, performs financial transactions, or accesses sensitive resources might require explicit user approval before execution. The tool call information is surfaced to the user, who can then approve or reject the action. diff --git a/src/pages/docs/ai-transport/features/sessions-identity/identifying-users-and-agents.mdx b/src/pages/docs/ai-transport/sessions-identity/identifying-users-and-agents.mdx similarity index 100% rename from src/pages/docs/ai-transport/features/sessions-identity/identifying-users-and-agents.mdx rename to src/pages/docs/ai-transport/sessions-identity/identifying-users-and-agents.mdx diff --git a/src/pages/docs/ai-transport/features/sessions-identity/index.mdx b/src/pages/docs/ai-transport/sessions-identity/index.mdx similarity index 100% rename from src/pages/docs/ai-transport/features/sessions-identity/index.mdx rename to src/pages/docs/ai-transport/sessions-identity/index.mdx diff --git a/src/pages/docs/ai-transport/features/sessions-identity/online-status.mdx b/src/pages/docs/ai-transport/sessions-identity/online-status.mdx similarity index 99% rename from src/pages/docs/ai-transport/features/sessions-identity/online-status.mdx rename to src/pages/docs/ai-transport/sessions-identity/online-status.mdx index 7a852d482f..3e9722d647 100644 --- a/src/pages/docs/ai-transport/features/sessions-identity/online-status.mdx +++ b/src/pages/docs/ai-transport/sessions-identity/online-status.mdx @@ -20,7 +20,7 @@ In channel-oriented sessions, online status serves several critical purposes: Use the [`enter()`](/docs/presence-occupancy/presence#enter) method to signal that a user or agent is online. When a client enters presence, they are added to the presence set and identified by their `clientId`. You can optionally include data when entering presence to communicate additional context. You have flexibility in when to enter presence. For example, an agent might choose to appear as online only while processing a specific task, or remain present for the duration of the entire session. Users typically enter presence when they connect to a session and remain present until they disconnect. diff --git a/src/pages/docs/ai-transport/features/sessions-identity/resuming-sessions.mdx b/src/pages/docs/ai-transport/sessions-identity/resuming-sessions.mdx similarity index 97% rename from src/pages/docs/ai-transport/features/sessions-identity/resuming-sessions.mdx rename to src/pages/docs/ai-transport/sessions-identity/resuming-sessions.mdx index 6883634eee..e5e4fbd159 100644 --- a/src/pages/docs/ai-transport/features/sessions-identity/resuming-sessions.mdx +++ b/src/pages/docs/ai-transport/sessions-identity/resuming-sessions.mdx @@ -30,8 +30,8 @@ The hydration strategy you choose depends on your application model and your cho - Hydrate in-progress responses from the channel: Load completed messages from your database and catch up on any in-progress responses from the channel. For detailed examples of hydrating the token stream, see the token streaming documentation: -- [Message-per-response hydration](/docs/ai-transport/features/token-streaming/message-per-response#hydration) -- [Message-per-token hydration](/docs/ai-transport/features/token-streaming/message-per-token#hydration) +- [Message-per-response hydration](/docs/ai-transport/token-streaming/message-per-response#hydration) +- [Message-per-token hydration](/docs/ai-transport/token-streaming/message-per-token#hydration) ## Agent resumes a session diff --git a/src/pages/docs/ai-transport/features/token-streaming/index.mdx b/src/pages/docs/ai-transport/token-streaming/index.mdx similarity index 90% rename from src/pages/docs/ai-transport/features/token-streaming/index.mdx rename to src/pages/docs/ai-transport/token-streaming/index.mdx index 16d3bce087..b727224e71 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/index.mdx +++ b/src/pages/docs/ai-transport/token-streaming/index.mdx @@ -52,7 +52,7 @@ The Realtime client maintains a persistent connection to the Ably service, enabl ### Message-per-response -[Message-per-response](/docs/ai-transport/features/token-streaming/message-per-response) streams tokens as they arrive while maintaining a clean, compacted message history. Each LLM response becomes a single message on an Ably channel that grows as tokens are appended. This results in efficient storage and straightforward retrieval of complete responses. +[Message-per-response](/docs/ai-transport/token-streaming/message-per-response) streams tokens as they arrive while maintaining a clean, compacted message history. Each LLM response becomes a single message on an Ably channel that grows as tokens are appended. This results in efficient storage and straightforward retrieval of complete responses. This pattern is the recommended approach for most applications. It excels when: @@ -67,7 +67,7 @@ Example use cases: ### Message-per-token -[Message-per-token](/docs/ai-transport/features/token-streaming/message-per-token) publishes every generated token as an independent Ably message. Each token appears as a separate message in channel history. +[Message-per-token](/docs/ai-transport/token-streaming/message-per-token) publishes every generated token as an independent Ably message. Each token appears as a separate message in channel history. This pattern is useful when: @@ -87,6 +87,6 @@ Different models and frameworks use different events to signal streaming state, ## Next steps -- Implement token streaming with [message-per-response](/docs/ai-transport/features/token-streaming/message-per-response) (recommended for most applications) -- Implement token streaming with [message-per-token](/docs/ai-transport/features/token-streaming/message-per-token) for sliding-window use cases +- Implement token streaming with [message-per-response](/docs/ai-transport/token-streaming/message-per-response) (recommended for most applications) +- Implement token streaming with [message-per-token](/docs/ai-transport/token-streaming/message-per-token) for sliding-window use cases - Explore the guides for integration with specific models and frameworks diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx similarity index 100% rename from src/pages/docs/ai-transport/features/token-streaming/message-per-response.mdx rename to src/pages/docs/ai-transport/token-streaming/message-per-response.mdx diff --git a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/token-streaming/message-per-token.mdx similarity index 99% rename from src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx rename to src/pages/docs/ai-transport/token-streaming/message-per-token.mdx index 66232ff5a2..e7d9efe93d 100644 --- a/src/pages/docs/ai-transport/features/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/token-streaming/message-per-token.mdx @@ -240,7 +240,7 @@ For brief disconnections, Ably's automatic [connection recovery](docs/connect/st ### Using rewind for recent history diff --git a/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx b/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx index 7cc7579371..b8ac5f7c9f 100644 --- a/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx +++ b/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx @@ -4,12 +4,12 @@ meta_description: "Stream tokens from the Anthropic Messages API over Ably in re meta_keywords: "AI, token streaming, Anthropic, Claude, Messages API, AI transport, Ably, realtime, message appends" --- -This guide shows you how to stream AI responses from Anthropic's [Messages API](https://docs.anthropic.com/en/api/messages) over Ably using the [message-per-response pattern](/docs/ai-transport/features/token-streaming/message-per-response). Specifically, it appends each response token to a single Ably message, creating a complete AI response that grows incrementally while delivering tokens in realtime. +This guide shows you how to stream AI responses from Anthropic's [Messages API](https://docs.anthropic.com/en/api/messages) over Ably using the [message-per-response pattern](/docs/ai-transport/token-streaming/message-per-response). Specifically, it appends each response token to a single Ably message, creating a complete AI response that grows incrementally while delivering tokens in realtime. Using Ably to distribute tokens from the Anthropic SDK enables you to broadcast AI responses to thousands of concurrent subscribers with reliable message delivery and ordering guarantees. This approach stores each complete response as a single message in channel history, making it easy to retrieve conversation history without processing thousands of individual token messages. ## Prerequisites @@ -246,7 +246,7 @@ This implementation: - Appends each token to the original message + + #### Subscribing Subscribe to the main conversation channel to receive control messages and model output. Subscribe to the reasoning channel on demand, for example in response to a click event. diff --git a/src/pages/docs/ai-transport/messaging/citations.mdx b/src/pages/docs/ai-transport/messaging/citations.mdx index a71ae51795..1944200959 100644 --- a/src/pages/docs/ai-transport/messaging/citations.mdx +++ b/src/pages/docs/ai-transport/messaging/citations.mdx @@ -140,7 +140,11 @@ When streaming response tokens using the [message-per-response](/docs/ai-transpo + + ## Subscribing to summaries diff --git a/src/pages/docs/ai-transport/messaging/human-in-the-loop.mdx b/src/pages/docs/ai-transport/messaging/human-in-the-loop.mdx index b896366da3..3b9c075259 100644 --- a/src/pages/docs/ai-transport/messaging/human-in-the-loop.mdx +++ b/src/pages/docs/ai-transport/messaging/human-in-the-loop.mdx @@ -52,6 +52,10 @@ async function requestHumanApproval(toolCall) { ``` + + ## Review and decide Authorized humans subscribe to approval requests on the conversation channel and publish their decisions. The `requestId` correlates the response with the original request. @@ -99,6 +103,10 @@ async function reject(requestId) { ``` + + ## Process the decision The agent listens for human decisions and acts accordingly. When a response arrives, the agent retrieves the pending request using the `requestId`, verifies that the user is permitted to approve that specific action, and either executes the action or handles the rejection. diff --git a/src/pages/docs/ai-transport/messaging/tool-calls.mdx b/src/pages/docs/ai-transport/messaging/tool-calls.mdx index dddbcfc7eb..d987d2bb26 100644 --- a/src/pages/docs/ai-transport/messaging/tool-calls.mdx +++ b/src/pages/docs/ai-transport/messaging/tool-calls.mdx @@ -94,6 +94,10 @@ Model APIs like OpenAI's [Responses API](https://platform.openai.com/docs/api-re To learn how to stream individual tokens as they are generated, see the [token streaming](/docs/ai-transport/token-streaming) documentation. + + ## Subscribing to tool calls Subscribe to tool call and model output messages on the channel. @@ -239,6 +243,10 @@ await channel.subscribe('tool_call', async (message) => { Client-side tools often require user permission to access device APIs. These permissions are managed by the device operating system, not the agent. Handle permission denials gracefully by publishing an error tool result so the AI can respond appropriately. + + The agent subscribes to tool results to continue processing. The `toolCallId` correlates the result back to the original request: diff --git a/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx index d37c2ca6d2..3442300f18 100644 --- a/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx @@ -96,6 +96,10 @@ for await (const event of stream) { Append only supports concatenating data of the same type as the original message. For example, if the initial message data is a string, all appended tokens must also be strings. If the initial message data is binary, all appended tokens must be binary. + + This pattern allows publishing append operations for multiple concurrent model responses on the same channel. As long as you append to the correct message serial, tokens from different responses will not interfere with each other, and the final concatenated message for each response will contain only the tokens from that response. ### Configuring rollup behaviour diff --git a/src/pages/docs/ai-transport/token-streaming/message-per-token.mdx b/src/pages/docs/ai-transport/token-streaming/message-per-token.mdx index 9bddcd8645..434aa6aa57 100644 --- a/src/pages/docs/ai-transport/token-streaming/message-per-token.mdx +++ b/src/pages/docs/ai-transport/token-streaming/message-per-token.mdx @@ -52,6 +52,10 @@ This approach maximizes throughput while maintaining ordering guarantees, allowi Unlike the [message-per-response](/docs/ai-transport/features/token-streaming/message-per-response) pattern, the message-per-token pattern requires you to [manage rate limits directly](/docs/ai-transport/features/token-streaming/token-rate-limits#per-token). + + ## Streaming patterns Ably is a pub/sub messaging platform, so you can structure your messages however works best for your application. Below are common patterns for streaming tokens, each showing both agent-side publishing and client-side subscription. Choose the approach that fits your use case, or create your own variation. diff --git a/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx b/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx index b8ac5f7c9f..1ce3260825 100644 --- a/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx +++ b/src/pages/docs/guides/ai-transport/anthropic-message-per-response.mdx @@ -175,7 +175,10 @@ Add the Ably client initialization to your `publisher.mjs` file: import Ably from 'ably'; // Initialize Ably Realtime client -const realtime = new Ably.Realtime({ key: '{{API_KEY}}' }); +const realtime = new Ably.Realtime({ + key: '{{API_KEY}}', + echoMessages: false +}); // Create a channel for publishing streamed AI responses const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); @@ -184,6 +187,10 @@ const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); The Ably Realtime client maintains a persistent connection to the Ably service, which allows you to publish tokens at high message rates with low latency. + + ### Publish initial message and append tokens When a new response begins, publish an initial message to create it. Ably assigns a [`serial`](/docs/messages#properties) identifier to the message. Use this `serial` to append each token to the message as it arrives from the Anthropic model. diff --git a/src/pages/docs/guides/ai-transport/anthropic-message-per-token.mdx b/src/pages/docs/guides/ai-transport/anthropic-message-per-token.mdx index 9ccc2ddf50..3b99dd66ca 100644 --- a/src/pages/docs/guides/ai-transport/anthropic-message-per-token.mdx +++ b/src/pages/docs/guides/ai-transport/anthropic-message-per-token.mdx @@ -152,7 +152,10 @@ Add the Ably client initialization to your `publisher.mjs` file: import Ably from 'ably'; // Initialize Ably Realtime client -const realtime = new Ably.Realtime({ key: '{{API_KEY}}' }); +const realtime = new Ably.Realtime({ + key: '{{API_KEY}}', + echoMessages: false +}); // Create a channel for publishing streamed AI responses const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); @@ -161,6 +164,10 @@ const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); The Ably Realtime client maintains a persistent connection to the Ably service, which allows you to publish tokens at high message rates with low latency. + + ### Map Anthropic streaming events to Ably messages Choose how to map [Anthropic streaming events](#understand-streaming-events) to Ably messages. You can choose any mapping strategy that suits your application's needs. This guide uses the following pattern as an example: diff --git a/src/pages/docs/guides/ai-transport/openai-message-per-response.mdx b/src/pages/docs/guides/ai-transport/openai-message-per-response.mdx index 3cecee670c..77afb9745d 100644 --- a/src/pages/docs/guides/ai-transport/openai-message-per-response.mdx +++ b/src/pages/docs/guides/ai-transport/openai-message-per-response.mdx @@ -189,7 +189,10 @@ Add the Ably client initialization to your `publisher.mjs` file: import Ably from 'ably'; // Initialize Ably Realtime client -const realtime = new Ably.Realtime({ key: '{{API_KEY}}' }); +const realtime = new Ably.Realtime({ + key: '{{API_KEY}}', + echoMessages: false +}); // Create a channel for publishing streamed AI responses const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); @@ -198,6 +201,10 @@ const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); The Ably Realtime client maintains a persistent connection to the Ably service, which allows you to publish tokens at high message rates with low latency. + + ### Publish initial message and append tokens When a new response begins, publish an initial message to create it. Ably assigns a [`serial`](/docs/messages#properties) identifier to the message. Use this `serial` to append each token to the message as it arrives from the OpenAI model. diff --git a/src/pages/docs/guides/ai-transport/openai-message-per-token.mdx b/src/pages/docs/guides/ai-transport/openai-message-per-token.mdx index 669f4dd246..7573bcbd3f 100644 --- a/src/pages/docs/guides/ai-transport/openai-message-per-token.mdx +++ b/src/pages/docs/guides/ai-transport/openai-message-per-token.mdx @@ -166,7 +166,10 @@ Add the Ably client initialization to your `publisher.mjs` file: import Ably from 'ably'; // Initialize Ably Realtime client -const realtime = new Ably.Realtime({ key: '{{API_KEY}}' }); +const realtime = new Ably.Realtime({ + key: '{{API_KEY}}', + echoMessages: false +}); // Create a channel for publishing streamed AI responses const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); @@ -175,6 +178,10 @@ const channel = realtime.channels.get('{{RANDOM_CHANNEL_NAME}}'); The Ably Realtime client maintains a persistent connection to the Ably service, which allows you to publish tokens at high message rates with low latency. + + ### Map OpenAI streaming events to Ably messages Choose how to map [OpenAI streaming events](#understand-streaming-events) to Ably messages. You can choose any mapping strategy that suits your application's needs. This guide uses the following pattern as an example: From 208d73d21eba6a6546289141d902033826384b2e Mon Sep 17 00:00:00 2001 From: matt423 Date: Fri, 16 Jan 2026 13:06:50 +0000 Subject: [PATCH 51/59] chore: Use 2.17.0 in examples --- examples/package.json | 2 +- examples/yarn.lock | 28 +++++--------------- src/components/Examples/ExamplesRenderer.tsx | 2 +- 3 files changed, 8 insertions(+), 24 deletions(-) diff --git a/examples/package.json b/examples/package.json index a88e37e6bd..f4b9c52ac8 100644 --- a/examples/package.json +++ b/examples/package.json @@ -110,7 +110,7 @@ "@ably/chat": "~1.1.0", "@ably/chat-react-ui-kit": "~0.3.0", "@ably/spaces": "~0.4.0", - "ably": "~2.16.0", + "ably": "~2.17.0", "cors": "^2.8.5", "franken-ui": "^2.0.0", "lodash": "^4.17.21", diff --git a/examples/yarn.lock b/examples/yarn.lock index 49e39d4d4c..a40fba73a9 100644 --- a/examples/yarn.lock +++ b/examples/yarn.lock @@ -900,10 +900,10 @@ magic-string "^0.27.0" react-refresh "^0.14.0" -ably@~2.16.0: - version "2.16.0" - resolved "https://registry.yarnpkg.com/ably/-/ably-2.16.0.tgz#b4042182e9ea54e621c60eb76997b3f760901fb4" - integrity sha512-X7SdHJC2ybCKAcFyyvi/VAN903q7JnEqdtpOXMM6TNWdNj/b40a4ijzEX/9lXSKddUJCiYM2KaFaVnSRn90YMw== +ably@~2.17.0: + version "2.17.0" + resolved "https://registry.yarnpkg.com/ably/-/ably-2.17.0.tgz#3d30547aebd3a70573277112d7f464e354f1e252" + integrity sha512-BJPxdFU2uuT4UDRUBcmLXRPNmXWGPIKZ+B7hMj1ygja3UZA2zox388yul1h1ie07V/8+Kn8fzPik3ewiSl5tAA== dependencies: "@ably/msgpack-js" "^0.4.0" dequal "^2.0.3" @@ -2836,16 +2836,7 @@ statuses@2.0.1: resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== -"string-width-cjs@npm:string-width@^4.2.0": - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - -string-width@^4.1.0: +"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.1.0: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -2863,14 +2854,7 @@ string-width@^5.0.1, string-width@^5.1.2: emoji-regex "^9.2.2" strip-ansi "^7.0.1" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1": - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - -strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== diff --git a/src/components/Examples/ExamplesRenderer.tsx b/src/components/Examples/ExamplesRenderer.tsx index e88c8d608d..0d7795d86d 100644 --- a/src/components/Examples/ExamplesRenderer.tsx +++ b/src/components/Examples/ExamplesRenderer.tsx @@ -37,7 +37,7 @@ const UserIndicator = ({ user }: { user: string }) => { const getDependencies = (id: string, products: string[], activeLanguage: LanguageKey) => { return { - ably: '~2.16.0', + ably: '~2.17.0', nanoid: '^5.0.7', minifaker: '1.34.1', 'franken-ui': '^2.0.0', From a0dbcdcb6c3ec2fb11741a0f085bba70142b6c8e Mon Sep 17 00:00:00 2001 From: matt423 Date: Fri, 16 Jan 2026 12:52:44 +0000 Subject: [PATCH 52/59] feat: Add AI Transport message per response example for Javascript --- .../javascript/README.md | 61 ++++++++++++ .../javascript/index.html | 49 ++++++++++ .../javascript/package.json | 10 ++ .../javascript/src/agent.ts | 52 ++++++++++ .../javascript/src/config.ts | 3 + .../javascript/src/llm.ts | 49 ++++++++++ .../javascript/src/script.ts | 94 +++++++++++++++++++ .../javascript/src/styles.css | 3 + .../javascript/tailwind.config.ts | 9 ++ .../javascript/vite.config.ts | 7 ++ examples/package.json | 3 +- src/data/examples/index.ts | 10 ++ 12 files changed, 349 insertions(+), 1 deletion(-) create mode 100644 examples/ai-transport-message-per-response/javascript/README.md create mode 100644 examples/ai-transport-message-per-response/javascript/index.html create mode 100644 examples/ai-transport-message-per-response/javascript/package.json create mode 100644 examples/ai-transport-message-per-response/javascript/src/agent.ts create mode 100644 examples/ai-transport-message-per-response/javascript/src/config.ts create mode 100644 examples/ai-transport-message-per-response/javascript/src/llm.ts create mode 100644 examples/ai-transport-message-per-response/javascript/src/script.ts create mode 100644 examples/ai-transport-message-per-response/javascript/src/styles.css create mode 100644 examples/ai-transport-message-per-response/javascript/tailwind.config.ts create mode 100644 examples/ai-transport-message-per-response/javascript/vite.config.ts diff --git a/examples/ai-transport-message-per-response/javascript/README.md b/examples/ai-transport-message-per-response/javascript/README.md new file mode 100644 index 0000000000..c0e3f41b66 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/README.md @@ -0,0 +1,61 @@ +# AI Transport message per response streaming + +Enable realtime streaming of AI/LLM responses by appending tokens to a single message over Ably. + +AI Transport message-per-response streaming allows applications to provide immediate, responsive AI interactions by streaming tokens in realtime. Unlike the message-per-token pattern, all tokens for a response are appended to a single message, which appears as one entry in channel history. This makes it easy to retrieve and display conversation history while still delivering live tokens in realtime. + +The streaming approach significantly improves perceived performance and user engagement. Instead of waiting 5-10 seconds for a complete AI response, users see tokens appearing progressively, creating a more natural conversation flow similar to watching someone type in realtime. + +Token streaming is implemented using [Ably AI Transport](/docs/ai-transport). AI Transport provides purpose-built APIs for realtime AI applications, offering reliable message delivery, automatic ordering, and seamless reconnection handling to ensure no tokens are lost during network interruptions. + +## Resources + +Use the following methods to implement AI Transport message-per-response streaming: + +- [`client.channels.get()`](/docs/channels#create): creates a new or retrieves an existing channel for AI Transport token streaming. +- [`channel.publish()`](/docs/channels#publish): publishes the initial message and captures the serial for token appending. +- [`channel.appendMessage()`](/docs/messages#append): appends individual tokens to the message as they arrive from the LLM service. +- [`channel.subscribe()`](/docs/channels#subscribe): subscribes to messages, handling `message.create`, `message.append`, and `message.update` actions. +- [`channel.setOptions()`](/docs/channels/options) with [`rewind`](/docs/channels/options/rewind): enables seamless message recovery during reconnections, delivering historical messages as `message.update` events. + +Find out more about [AI Transport](/docs/ai-transport) and [message appending](/docs/ai-transport/features/token-streaming/message-per-response). + +## Getting started + +1. Clone the [Ably docs](https://github.com/ably/docs) repository where this example can be found: + + ```sh + git clone git@github.com:ably/docs.git + ``` + +2. Change directory: + + ```sh + cd examples/ + ``` + +3. Rename the environment file: + + ```sh + mv .env.example .env.local + ``` + +4. In `.env.local` update the value of `VITE_ABLY_KEY` to be your Ably API key. + +5. Install dependencies: + + ```sh + yarn install + ``` + +6. Run the server: + + ```sh + yarn run ai-transport-message-per-response-javascript + ``` + +7. Try it out by opening [http://localhost:5173/](http://localhost:5173/) with your browser and selecting a prompt to see realtime AI token streaming. + +## Open in CodeSandbox + +In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. diff --git a/examples/ai-transport-message-per-response/javascript/index.html b/examples/ai-transport-message-per-response/javascript/index.html new file mode 100644 index 0000000000..b002b18e24 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/index.html @@ -0,0 +1,49 @@ + + + + + + + AI Transport Message Per Response - JavaScript + + + +
+ +
+
+
+ +
+ + Ready + + + +
+
+
+ Select a prompt below to get started + +
+
+
+ + +
+
+ +
+
+
+ + + + diff --git a/examples/ai-transport-message-per-response/javascript/package.json b/examples/ai-transport-message-per-response/javascript/package.json new file mode 100644 index 0000000000..710f1c4a12 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/package.json @@ -0,0 +1,10 @@ +{ + "name": "ai-transport-message-per-response-javascript", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + } +} diff --git a/examples/ai-transport-message-per-response/javascript/src/agent.ts b/examples/ai-transport-message-per-response/javascript/src/agent.ts new file mode 100644 index 0000000000..2c297e000f --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/agent.ts @@ -0,0 +1,52 @@ +// Agent Service +// This consumes LLM streams and publishes tokens using the message-per-response pattern +// All tokens are appended to a single message, which appears as one entry in channel history + +import * as Ably from 'ably'; +import { MockLLM } from './llm'; + +export class Agent { + private client: Ably.Realtime; + private channel: Ably.RealtimeChannel; + private llm: MockLLM; + + constructor(ablyKey: string, channelName: string) { + this.client = new Ably.Realtime({ + key: ablyKey, + clientId: 'ai-agent', + }); + this.channel = this.client.channels.get(channelName); + this.llm = new MockLLM(); + } + + async processPrompt(prompt: string): Promise { + const stream = await this.llm.responses.create(prompt); + let msgSerial: string | null = null; + + for await (const event of stream) { + if (event.type === 'message_start') { + // Create initial empty message and capture its serial + const publishResult = await this.channel.publish({ + name: 'response', + data: '', + }); + msgSerial = publishResult.serials[0]; + } else if (event.type === 'message_delta') { + // Append each token to the same message using its serial + if (msgSerial && event.text) { + this.channel.appendMessage({ + serial: msgSerial, + data: event.text, + }); + } + } else if (event.type === 'message_stop') { + // Stream complete - all tokens have been appended + console.log('Response complete'); + } + } + } + + disconnect(): void { + this.client.close(); + } +} diff --git a/examples/ai-transport-message-per-response/javascript/src/config.ts b/examples/ai-transport-message-per-response/javascript/src/config.ts new file mode 100644 index 0000000000..8617022a2d --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/config.ts @@ -0,0 +1,3 @@ +export const config = { + ABLY_KEY: import.meta.env.VITE_ABLY_KEY || 'YOUR_ABLY_KEY_HERE', +}; diff --git a/examples/ai-transport-message-per-response/javascript/src/llm.ts b/examples/ai-transport-message-per-response/javascript/src/llm.ts new file mode 100644 index 0000000000..ab9f1061f8 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/llm.ts @@ -0,0 +1,49 @@ +// Mock LLM Service +// This simulates a generic LLM SDK with streaming capabilities + +interface StreamEvent { + type: 'message_start' | 'message_delta' | 'message_stop'; + text?: string; + responseId: string; +} + +export class MockLLM { + private readonly responseText = + 'Ably AI Transport is a solution for building stateful, steerable, multi-device AI experiences into new or existing applications. You can use AI Transport as the transport layer with any LLM or agent framework, without rebuilding your existing stack or being locked to a particular vendor.'; + + responses = { + create: (prompt: string) => this.createStream(prompt), + }; + + private async *createStream(_prompt: string): AsyncIterable { + const responseId = `resp_${crypto.randomUUID()}`; + + // Yield start event + yield { type: 'message_start', responseId }; + + // Chunk text into tokens (simulates LLM tokenization) + const tokens = this.chunkTextLikeAI(this.responseText); + + for (const token of tokens) { + // Simulate realistic delay between tokens + await new Promise((resolve) => setTimeout(resolve, Math.random() * 150 + 50)); + + // Yield token event + yield { type: 'message_delta', text: token, responseId }; + } + + // Yield stop event + yield { type: 'message_stop', responseId }; + } + + private chunkTextLikeAI(text: string): string[] { + const chunks: string[] = []; + let pos = 0; + while (pos < text.length) { + const size = Math.floor(Math.random() * 8) + 1; + chunks.push(text.slice(pos, pos + size)); + pos += size; + } + return chunks.filter((chunk) => chunk.length > 0); + } +} diff --git a/examples/ai-transport-message-per-response/javascript/src/script.ts b/examples/ai-transport-message-per-response/javascript/src/script.ts new file mode 100644 index 0000000000..ee7dc3b896 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/script.ts @@ -0,0 +1,94 @@ +import * as Ably from 'ably'; +import { Agent } from './agent'; +import { config } from './config'; + +// Generate unique channel name for this session +const CHANNEL_NAME = `ai:response-${crypto.randomUUID()}`; +const client = new Ably.Realtime({ + key: config.ABLY_KEY, +}); + +const channel = client.channels.get(CHANNEL_NAME); + +// DOM elements +const responseTextElement = document.getElementById('response-text') as HTMLDivElement; +const connectionToggle = document.getElementById('connection-toggle') as HTMLButtonElement; +const promptButton = document.getElementById('prompt-button') as HTMLButtonElement; +const processingStatus = document.getElementById('processing-status') as HTMLSpanElement; + +// Track responses by message serial +const responses = new Map(); +let currentSerial: string | null = null; + +const updateDisplay = () => { + if (currentSerial) { + responseTextElement.innerText = responses.get(currentSerial) || ''; + } +}; + +// Subscribe to messages - rewind delivers history as message.update, +// then seamlessly transitions to live message.append events +channel.subscribe((message: Ably.Message) => { + const serial = message.serial; + if (!serial) { + return; + } + + switch (message.action) { + case 'message.create': + responses.set(serial, message.data || ''); + currentSerial = serial; + processingStatus.innerText = 'Streaming'; + break; + case 'message.append': { + // Only append if this is for the current response + if (currentSerial === serial) { + const current = responses.get(serial) || ''; + responses.set(serial, current + (message.data || '')); + } + break; + } + case 'message.update': + // Full state from history or resync - always use it + responses.set(serial, message.data || ''); + currentSerial = serial; + break; + } + updateDisplay(); +}); + +const handlePromptClick = () => { + currentSerial = null; + responseTextElement.innerText = ''; + processingStatus.innerText = 'Streaming'; + + const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + agent.processPrompt('What is Ably AI Transport?'); +}; + +const handleConnect = async () => { + // Set rewind option before attaching to get history as message.update events + channel.setOptions({ params: { rewind: '2m' } }); + await channel.attach(); + connectionToggle.innerText = 'Disconnect'; + processingStatus.innerText = 'Ready'; +}; + +const handleDisconnect = async () => { + await channel.detach(); + processingStatus.innerText = 'Paused'; + connectionToggle.innerText = 'Connect'; +}; + +const handleConnectionToggle = () => { + if (channel.state === 'attached') { + handleDisconnect(); + } else { + handleConnect(); + } +}; + +connectionToggle.onclick = handleConnectionToggle; +promptButton.onclick = handlePromptClick; + +handleConnect(); diff --git a/examples/ai-transport-message-per-response/javascript/src/styles.css b/examples/ai-transport-message-per-response/javascript/src/styles.css new file mode 100644 index 0000000000..b5c61c9567 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/styles.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; diff --git a/examples/ai-transport-message-per-response/javascript/tailwind.config.ts b/examples/ai-transport-message-per-response/javascript/tailwind.config.ts new file mode 100644 index 0000000000..1c86e1c371 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/tailwind.config.ts @@ -0,0 +1,9 @@ +import baseConfig from '../../tailwind.config'; +import type { Config } from 'tailwindcss'; + +const config: Config = { + ...baseConfig, + content: ['./src/**/*.{js,ts,tsx}', './index.html'], +}; + +export default config; diff --git a/examples/ai-transport-message-per-response/javascript/vite.config.ts b/examples/ai-transport-message-per-response/javascript/vite.config.ts new file mode 100644 index 0000000000..3b1cf13b4f --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite'; +import baseConfig from '../../vite.config'; + +export default defineConfig({ + ...baseConfig, + envDir: '../../', +}); diff --git a/examples/package.json b/examples/package.json index f4b9c52ac8..52adfbc551 100644 --- a/examples/package.json +++ b/examples/package.json @@ -6,7 +6,7 @@ "node": ">=20.0.0" }, "workspaces": [ - "ai-transport-message-per-token/react", + "ai-transport-message-per-response/javascript", "ai-transport-message-per-token/javascript", "ai-transport-message-per-token/react", "auth-generate-jwt/react", @@ -57,6 +57,7 @@ "spaces-member-location/javascript" ], "scripts": { + "ai-transport-message-per-response-javascript": "yarn workspace ai-transport-message-per-response-javascript dev", "ai-transport-message-per-token-javascript": "yarn workspace ai-transport-message-per-token-javascript dev", "ai-transport-message-per-token-react": "yarn workspace ai-transport-message-per-token-react dev", "auth-generate-jwt-javascript": "yarn workspace auth-generate-jwt-javascript dev", diff --git a/src/data/examples/index.ts b/src/data/examples/index.ts index 024ccd64d5..b7d4ba13c7 100644 --- a/src/data/examples/index.ts +++ b/src/data/examples/index.ts @@ -13,6 +13,16 @@ export const examples: Example[] = [ metaTitle: 'Build AI message-per-token streaming with Ably AI Transport', metaDescription: `Stream AI-generated tokens in realtime using the message-per-token pattern with Ably's AI Transport. Implement scalable token streaming with low latency.`, }, + { + id: 'ai-transport-message-per-response', + name: 'Message per response streaming', + description: 'Stream AI responses by appending tokens to a single message using the message-per-response pattern.', + products: ['ai_transport'], + layout: 'single-horizontal', + visibleFiles: ['src/script.ts', 'src/llm.ts', 'src/agent.ts', 'App.tsx', 'llm.ts', 'agent.ts', 'index.tsx'], + metaTitle: 'Build AI message-per-response streaming with Ably AI Transport', + metaDescription: `Stream AI-generated tokens by appending them to a single message using Ably AI Transport. Each response appears as one compacted message in channel history.`, + }, { id: 'chat-presence', name: 'Chat presence', From 0449aeec5ed2e27c1f23cebb0ad90d271bbfbc5a Mon Sep 17 00:00:00 2001 From: matt423 Date: Fri, 16 Jan 2026 12:54:04 +0000 Subject: [PATCH 53/59] feat: Add AI Transport message per response example for React --- .../react/README.md | 70 +++++++++ .../react/index.html | 12 ++ .../react/package.json | 10 ++ .../react/postcss.config.js | 6 + .../react/src/App.tsx | 146 ++++++++++++++++++ .../react/src/agent.ts | 52 +++++++ .../react/src/config.ts | 3 + .../react/src/index.tsx | 9 ++ .../react/src/llm.ts | 49 ++++++ .../react/src/styles/styles.css | 3 + .../react/tailwind.config.ts | 9 ++ .../react/tsconfig.json | 20 +++ .../react/tsconfig.node.json | 10 ++ .../react/vite.config.ts | 7 + examples/package.json | 2 + 15 files changed, 408 insertions(+) create mode 100644 examples/ai-transport-message-per-response/react/README.md create mode 100644 examples/ai-transport-message-per-response/react/index.html create mode 100644 examples/ai-transport-message-per-response/react/package.json create mode 100644 examples/ai-transport-message-per-response/react/postcss.config.js create mode 100644 examples/ai-transport-message-per-response/react/src/App.tsx create mode 100644 examples/ai-transport-message-per-response/react/src/agent.ts create mode 100644 examples/ai-transport-message-per-response/react/src/config.ts create mode 100644 examples/ai-transport-message-per-response/react/src/index.tsx create mode 100644 examples/ai-transport-message-per-response/react/src/llm.ts create mode 100644 examples/ai-transport-message-per-response/react/src/styles/styles.css create mode 100644 examples/ai-transport-message-per-response/react/tailwind.config.ts create mode 100644 examples/ai-transport-message-per-response/react/tsconfig.json create mode 100644 examples/ai-transport-message-per-response/react/tsconfig.node.json create mode 100644 examples/ai-transport-message-per-response/react/vite.config.ts diff --git a/examples/ai-transport-message-per-response/react/README.md b/examples/ai-transport-message-per-response/react/README.md new file mode 100644 index 0000000000..bb10ac8f99 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/README.md @@ -0,0 +1,70 @@ +# AI Transport message per response streaming + +Enable realtime streaming of AI/LLM responses using the message-per-response pattern, where all tokens are appended to a single Ably message. + +AI Transport message-per-response streaming allows applications to provide immediate, responsive AI interactions by streaming tokens in realtime while maintaining a clean message history. Each complete AI response appears as a single message in channel history, making it easy to retrieve and display multi-response conversation history. + +The streaming approach significantly improves perceived performance and user engagement. Instead of waiting 5-10 seconds for a complete AI response, users see tokens appearing progressively, creating a more natural conversation flow similar to watching someone type in realtime. + +Token streaming is implemented using [Ably AI Transport](/docs/ai-transport). AI Transport provides purpose-built APIs for realtime AI applications, offering reliable message delivery, automatic ordering, and seamless reconnection handling to ensure no tokens are lost during network interruptions. + +## Resources + +Use the following components to implement AI Transport message-per-response streaming: + +- [`AblyProvider`](/docs/getting-started/react-hooks#ably-provider): initializes and manages a shared Ably client instance, passing it down through React context to enable realtime AI Transport functionality across the application. +- [`ChannelProvider`](/docs/getting-started/react-hooks#channel-provider): manages the state and functionality of a specific channel, providing access to AI response tokens and streaming state via React context. +- [`useChannel()`](/docs/getting-started/react-hooks#useChannel) hook: a hook to subscribe to messages with `message.create`, `message.append`, and `message.update` actions. +- [`rewind`](/docs/channels/options/rewind) channel option: enables seamless message recovery during reconnections, delivering historical messages as `message.update` events. +- [`appendMessage()`](/docs/api/realtime-sdk/channels#append-message): appends tokens to an existing message using its serial. + +Find out more about [AI Transport](/docs/ai-transport) and [message-per-response](/docs/ai-transport/features/token-streaming/message-per-response). + +## Getting started + +1. Clone the [Ably docs](https://github.com/ably/docs) repository where this example can be found: + + ```sh + git clone git@github.com:ably/docs.git + ``` + +2. Change directory: + + ```sh + cd examples/ + ``` + +3. Rename the environment file: + + ```sh + mv .env.example .env.local + ``` + +4. In `.env.local` update the value of `VITE_ABLY_KEY` to be your Ably API key. + +5. Install dependencies: + + ```sh + yarn install + ``` + +6. Run the server: + + ```sh + yarn run ai-transport-message-per-response-react + ``` + +7. Try it out by opening [http://localhost:5173/](http://localhost:5173/) with your browser and selecting a prompt to see realtime AI token streaming. + +## Open in CodeSandbox + +In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. + +## How it works + +The message-per-response pattern works by: + +1. **Initial message**: When an agent response begins, publish an initial message with `message.create` action to the Ably channel with an empty or the first token as content. +2. **Token streaming**: Append subsequent tokens to the original message by publishing those tokens with the `message.append` action. +3. **Live delivery**: Clients subscribed to the channel receive each appended token in realtime, allowing them to progressively render the response. +4. **Compacted history**: The channel history contains only one message per agent response, which includes all tokens appended to it concatenated together. diff --git a/examples/ai-transport-message-per-response/react/index.html b/examples/ai-transport-message-per-response/react/index.html new file mode 100644 index 0000000000..2c7da1dce4 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/index.html @@ -0,0 +1,12 @@ + + + + + + AI Transport Message Per Response + + +
+ + + diff --git a/examples/ai-transport-message-per-response/react/package.json b/examples/ai-transport-message-per-response/react/package.json new file mode 100644 index 0000000000..efedff88ad --- /dev/null +++ b/examples/ai-transport-message-per-response/react/package.json @@ -0,0 +1,10 @@ +{ + "name": "ai-transport-message-per-response-react", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + } +} diff --git a/examples/ai-transport-message-per-response/react/postcss.config.js b/examples/ai-transport-message-per-response/react/postcss.config.js new file mode 100644 index 0000000000..2aa7205d4b --- /dev/null +++ b/examples/ai-transport-message-per-response/react/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/examples/ai-transport-message-per-response/react/src/App.tsx b/examples/ai-transport-message-per-response/react/src/App.tsx new file mode 100644 index 0000000000..e63286be2d --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/App.tsx @@ -0,0 +1,146 @@ +import React, { useState } from 'react'; +import { AblyProvider, ChannelProvider, useChannel, useConnectionStateListener } from 'ably/react'; +import { Realtime, Message } from 'ably'; +import { Agent } from './agent'; +import { config } from './config'; +import './styles/styles.css'; + +// Generate unique channel name for this session +const CHANNEL_NAME = `ai:response-${crypto.randomUUID()}`; + +const client = new Realtime({ + key: config.ABLY_KEY, +}); + +const AITransportDemo: React.FC = () => { + const [responses, setResponses] = useState>(new Map()); + const [currentSerial, setCurrentSerial] = useState(null); + const [connectionState, setConnectionState] = useState('disconnected'); + const [isChannelDetached, setIsChannelDetached] = useState(false); + + // Subscribe to messages on the channel + const { channel } = useChannel(CHANNEL_NAME, (message: Message) => { + const serial = message.serial; + if (!serial) { + return; + } + + switch (message.action) { + case 'message.create': + // Initial message creation + setResponses((prev) => new Map(prev).set(serial, message.data || '')); + setCurrentSerial(serial); + break; + case 'message.append': + // Only append if this is for the current response + setCurrentSerial((current) => { + if (current === serial) { + setResponses((prev) => { + const newMap = new Map(prev); + const existing = newMap.get(serial) || ''; + return newMap.set(serial, existing + (message.data || '')); + }); + } + return current; + }); + break; + case 'message.update': + // Full state from history (rewind) - replace existing data + setResponses((prev) => new Map(prev).set(serial, message.data || '')); + setCurrentSerial(serial); + break; + } + }); + + useConnectionStateListener((stateChange: { current: string }) => { + setConnectionState(stateChange.current); + }); + + const currentResponse = currentSerial ? responses.get(currentSerial) || '' : ''; + + const handlePromptClick = () => { + if (connectionState !== 'connected' || isChannelDetached) { + return; + } + + setResponses(new Map()); + setCurrentSerial(null); + + const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + agent.processPrompt('What is Ably AI Transport?'); + }; + + const handleDisconnect = () => { + channel.detach(); + setIsChannelDetached(true); + }; + + const handleReconnect = async () => { + setIsChannelDetached(false); + // Set rewind option before reattaching to get history as message.update events + channel.setOptions({ params: { rewind: '2m' } }); + await channel.attach(); + }; + + return ( +
+ {/* Response section with always visible status */} +
+
+
+
+ + + {isChannelDetached ? 'Disconnected' : connectionState === 'connected' ? 'Connected' : 'Disconnected'} + + {/* Disconnect/Reconnect button */} + +
+
+
+ {currentResponse || 'Select a prompt below to get started'} +
+
+
+ + {/* Prompt selection */} +
+
+ +
+
+
+ ); +}; + +// Main App component with providers +const App: React.FC = () => { + return ( + + + + + + ); +}; + +export default App; diff --git a/examples/ai-transport-message-per-response/react/src/agent.ts b/examples/ai-transport-message-per-response/react/src/agent.ts new file mode 100644 index 0000000000..2c297e000f --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/agent.ts @@ -0,0 +1,52 @@ +// Agent Service +// This consumes LLM streams and publishes tokens using the message-per-response pattern +// All tokens are appended to a single message, which appears as one entry in channel history + +import * as Ably from 'ably'; +import { MockLLM } from './llm'; + +export class Agent { + private client: Ably.Realtime; + private channel: Ably.RealtimeChannel; + private llm: MockLLM; + + constructor(ablyKey: string, channelName: string) { + this.client = new Ably.Realtime({ + key: ablyKey, + clientId: 'ai-agent', + }); + this.channel = this.client.channels.get(channelName); + this.llm = new MockLLM(); + } + + async processPrompt(prompt: string): Promise { + const stream = await this.llm.responses.create(prompt); + let msgSerial: string | null = null; + + for await (const event of stream) { + if (event.type === 'message_start') { + // Create initial empty message and capture its serial + const publishResult = await this.channel.publish({ + name: 'response', + data: '', + }); + msgSerial = publishResult.serials[0]; + } else if (event.type === 'message_delta') { + // Append each token to the same message using its serial + if (msgSerial && event.text) { + this.channel.appendMessage({ + serial: msgSerial, + data: event.text, + }); + } + } else if (event.type === 'message_stop') { + // Stream complete - all tokens have been appended + console.log('Response complete'); + } + } + } + + disconnect(): void { + this.client.close(); + } +} diff --git a/examples/ai-transport-message-per-response/react/src/config.ts b/examples/ai-transport-message-per-response/react/src/config.ts new file mode 100644 index 0000000000..28bdb0c670 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/config.ts @@ -0,0 +1,3 @@ +export const config = { + ABLY_KEY: import.meta.env.VITE_ABLY_KEY || 'demo-key-for-examples:YOUR_ABLY_KEY_HERE', +}; diff --git a/examples/ai-transport-message-per-response/react/src/index.tsx b/examples/ai-transport-message-per-response/react/src/index.tsx new file mode 100644 index 0000000000..53a330f5c2 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/index.tsx @@ -0,0 +1,9 @@ +import { StrictMode } from 'react'; +import { createRoot } from 'react-dom/client'; +import App from './App'; + +createRoot(document.getElementById('root')).render( + + + , +); diff --git a/examples/ai-transport-message-per-response/react/src/llm.ts b/examples/ai-transport-message-per-response/react/src/llm.ts new file mode 100644 index 0000000000..ab9f1061f8 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/llm.ts @@ -0,0 +1,49 @@ +// Mock LLM Service +// This simulates a generic LLM SDK with streaming capabilities + +interface StreamEvent { + type: 'message_start' | 'message_delta' | 'message_stop'; + text?: string; + responseId: string; +} + +export class MockLLM { + private readonly responseText = + 'Ably AI Transport is a solution for building stateful, steerable, multi-device AI experiences into new or existing applications. You can use AI Transport as the transport layer with any LLM or agent framework, without rebuilding your existing stack or being locked to a particular vendor.'; + + responses = { + create: (prompt: string) => this.createStream(prompt), + }; + + private async *createStream(_prompt: string): AsyncIterable { + const responseId = `resp_${crypto.randomUUID()}`; + + // Yield start event + yield { type: 'message_start', responseId }; + + // Chunk text into tokens (simulates LLM tokenization) + const tokens = this.chunkTextLikeAI(this.responseText); + + for (const token of tokens) { + // Simulate realistic delay between tokens + await new Promise((resolve) => setTimeout(resolve, Math.random() * 150 + 50)); + + // Yield token event + yield { type: 'message_delta', text: token, responseId }; + } + + // Yield stop event + yield { type: 'message_stop', responseId }; + } + + private chunkTextLikeAI(text: string): string[] { + const chunks: string[] = []; + let pos = 0; + while (pos < text.length) { + const size = Math.floor(Math.random() * 8) + 1; + chunks.push(text.slice(pos, pos + size)); + pos += size; + } + return chunks.filter((chunk) => chunk.length > 0); + } +} diff --git a/examples/ai-transport-message-per-response/react/src/styles/styles.css b/examples/ai-transport-message-per-response/react/src/styles/styles.css new file mode 100644 index 0000000000..b5c61c9567 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/styles/styles.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; diff --git a/examples/ai-transport-message-per-response/react/tailwind.config.ts b/examples/ai-transport-message-per-response/react/tailwind.config.ts new file mode 100644 index 0000000000..1c86e1c371 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/tailwind.config.ts @@ -0,0 +1,9 @@ +import baseConfig from '../../tailwind.config'; +import type { Config } from 'tailwindcss'; + +const config: Config = { + ...baseConfig, + content: ['./src/**/*.{js,ts,tsx}', './index.html'], +}; + +export default config; diff --git a/examples/ai-transport-message-per-response/react/tsconfig.json b/examples/ai-transport-message-per-response/react/tsconfig.json new file mode 100644 index 0000000000..e92702dbee --- /dev/null +++ b/examples/ai-transport-message-per-response/react/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ESNext", + "lib": ["DOM", "DOM.Iterable", "ESNext"], + "allowJs": false, + "skipLibCheck": true, + "esModuleInterop": false, + "allowSyntheticDefaultImports": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "module": "ESNext", + "moduleResolution": "Node", + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx" + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/examples/ai-transport-message-per-response/react/tsconfig.node.json b/examples/ai-transport-message-per-response/react/tsconfig.node.json new file mode 100644 index 0000000000..42872c59f5 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/examples/ai-transport-message-per-response/react/vite.config.ts b/examples/ai-transport-message-per-response/react/vite.config.ts new file mode 100644 index 0000000000..3b1cf13b4f --- /dev/null +++ b/examples/ai-transport-message-per-response/react/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite'; +import baseConfig from '../../vite.config'; + +export default defineConfig({ + ...baseConfig, + envDir: '../../', +}); diff --git a/examples/package.json b/examples/package.json index 52adfbc551..857d8ecf32 100644 --- a/examples/package.json +++ b/examples/package.json @@ -7,6 +7,7 @@ }, "workspaces": [ "ai-transport-message-per-response/javascript", + "ai-transport-message-per-response/react", "ai-transport-message-per-token/javascript", "ai-transport-message-per-token/react", "auth-generate-jwt/react", @@ -58,6 +59,7 @@ ], "scripts": { "ai-transport-message-per-response-javascript": "yarn workspace ai-transport-message-per-response-javascript dev", + "ai-transport-message-per-response-react": "yarn workspace ai-transport-message-per-response-react dev", "ai-transport-message-per-token-javascript": "yarn workspace ai-transport-message-per-token-javascript dev", "ai-transport-message-per-token-react": "yarn workspace ai-transport-message-per-token-react dev", "auth-generate-jwt-javascript": "yarn workspace auth-generate-jwt-javascript dev", From 01ab0f840e803ae284944ecdc4b145345c695853 Mon Sep 17 00:00:00 2001 From: matt423 Date: Fri, 16 Jan 2026 13:50:54 +0000 Subject: [PATCH 54/59] chore: Use single agent reference in AIT examples Prevent multiple agent(and Realtime) . --- .../javascript/src/script.ts | 4 +++- .../ai-transport-message-per-response/react/src/App.tsx | 9 +++++++-- .../javascript/src/script.ts | 5 ++++- .../ai-transport-message-per-token/react/src/App.tsx | 9 +++++++-- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/examples/ai-transport-message-per-response/javascript/src/script.ts b/examples/ai-transport-message-per-response/javascript/src/script.ts index ee7dc3b896..b8748377ab 100644 --- a/examples/ai-transport-message-per-response/javascript/src/script.ts +++ b/examples/ai-transport-message-per-response/javascript/src/script.ts @@ -10,6 +10,9 @@ const client = new Ably.Realtime({ const channel = client.channels.get(CHANNEL_NAME); +// Agent for processing prompts +const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + // DOM elements const responseTextElement = document.getElementById('response-text') as HTMLDivElement; const connectionToggle = document.getElementById('connection-toggle') as HTMLButtonElement; @@ -62,7 +65,6 @@ const handlePromptClick = () => { responseTextElement.innerText = ''; processingStatus.innerText = 'Streaming'; - const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); agent.processPrompt('What is Ably AI Transport?'); }; diff --git a/examples/ai-transport-message-per-response/react/src/App.tsx b/examples/ai-transport-message-per-response/react/src/App.tsx index e63286be2d..07bb4b730b 100644 --- a/examples/ai-transport-message-per-response/react/src/App.tsx +++ b/examples/ai-transport-message-per-response/react/src/App.tsx @@ -18,6 +18,12 @@ const AITransportDemo: React.FC = () => { const [connectionState, setConnectionState] = useState('disconnected'); const [isChannelDetached, setIsChannelDetached] = useState(false); + // Agent persists across renders to avoid creating new connections + const agentRef = React.useRef(null); + if (!agentRef.current) { + agentRef.current = new Agent(config.ABLY_KEY, CHANNEL_NAME); + } + // Subscribe to messages on the channel const { channel } = useChannel(CHANNEL_NAME, (message: Message) => { const serial = message.serial; @@ -66,8 +72,7 @@ const AITransportDemo: React.FC = () => { setResponses(new Map()); setCurrentSerial(null); - const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); - agent.processPrompt('What is Ably AI Transport?'); + agentRef.current?.processPrompt('What is Ably AI Transport?'); }; const handleDisconnect = () => { diff --git a/examples/ai-transport-message-per-token/javascript/src/script.ts b/examples/ai-transport-message-per-token/javascript/src/script.ts index 8687238245..398368acff 100644 --- a/examples/ai-transport-message-per-token/javascript/src/script.ts +++ b/examples/ai-transport-message-per-token/javascript/src/script.ts @@ -8,6 +8,10 @@ const client = new Ably.Realtime({ key: config.ABLY_KEY, }); const channel = client.channels.get(CHANNEL_NAME); + +// Agent for processing prompts +const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + const responseTextElement = document.getElementById('response-text') as HTMLDivElement; const connectionToggle = document.getElementById('connection-toggle') as HTMLButtonElement; const promptButton = document.getElementById('prompt-button') as HTMLButtonElement; @@ -90,7 +94,6 @@ const handlePromptClick = () => { currentResponseId = `request-${crypto.randomUUID()}`; responseText = ''; updateDisplay(); - const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); agent.processPrompt('What is Ably AI Transport?', currentResponseId); }; diff --git a/examples/ai-transport-message-per-token/react/src/App.tsx b/examples/ai-transport-message-per-token/react/src/App.tsx index 01dfd4eae2..c7ecc026ee 100644 --- a/examples/ai-transport-message-per-token/react/src/App.tsx +++ b/examples/ai-transport-message-per-token/react/src/App.tsx @@ -21,6 +21,12 @@ const AITransportDemo: React.FC = () => { const isHydrating = useRef(false); const pendingTokens = useRef([]); + // Agent persists across renders to avoid creating new connections + const agentRef = React.useRef(null); + if (!agentRef.current) { + agentRef.current = new Agent(config.ABLY_KEY, CHANNEL_NAME); + } + const { channel } = useChannel(CHANNEL_NAME, (message: Message) => { const responseId = message.extras?.headers?.responseId; @@ -57,8 +63,7 @@ const AITransportDemo: React.FC = () => { const responseId = `request-${crypto.randomUUID()}`; currentResponseId.current = responseId; - const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); - agent.processPrompt('What is Ably AI Transport?', responseId); + agentRef.current?.processPrompt('What is Ably AI Transport?', responseId); }; const handleDisconnect = () => { From 6fa00a78b18ee7db43e7ce15485de5ce2d67555a Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Fri, 16 Jan 2026 14:53:12 +0000 Subject: [PATCH 55/59] docs: writing style guide Adds additional friendly guidance to the writing style guide to stop LLMs consuming it from falling back onto bad habits. --- writing-style-guide.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/writing-style-guide.md b/writing-style-guide.md index ee739e7c67..68054b1b2c 100644 --- a/writing-style-guide.md +++ b/writing-style-guide.md @@ -235,6 +235,7 @@ Note the following points: * The list should always have a piece of text introducing the list followed by a colon, and then a blank line. * Each sentence in the list is terminated by a full-stop (period). * If each item in the list is a single word, a terminating period is not required. +* Do not use bold formatting for prefixes in bullet points (for example, avoid patterns like "**Feature name:** description"), as this is a common indicator of AI-generated content. ## Codeblocks @@ -244,6 +245,8 @@ When inserting example code in the text: * Break the text before a codeblock with a colon, not a period (which is a hard stop in the mind of the reader, rather than a continuation). * There should *not* be a space before the colon. * Place a blank line after the colon and before the code block. +* All headings must be followed by introductory text. Never place a code block, list, or other content immediately after a heading without explanatory text first. +* For JavaScript and TypeScript code, prefer single quotes over double quotes for strings (excluding JSON, which must use double quotes per the specification). ## Acronyms @@ -274,6 +277,16 @@ Make sure you write the correct case for product names: * GitHub not Github * macOS not Mac OS +## Avoid AI-generated content fingerprints + +Technical documentation should maintain a natural, human writing style and avoid patterns commonly associated with AI-generated content: + +* Do not use em-dashes (—) in technical writing. Prefer standard hyphens (-) or restructure the sentence for better clarity. +* Avoid bold prefixes in bullet points (for example, patterns like "**Feature:** Description" or "**Benefits:** Details"). This formatting style is a telltale sign of AI-generated content. +* Avoid formulaic patterns and overly structured prose that may appear mechanical or template-driven. + +These guidelines help ensure documentation feels authentic and professionally written while maintaining readability and clarity. + ## Other considerations Some additional points to bear in mind: From 1847a6b1fd1ec28a4f573c15816845bbfa8c57c0 Mon Sep 17 00:00:00 2001 From: Mike Christensen Date: Fri, 16 Jan 2026 15:21:52 +0000 Subject: [PATCH 56/59] ait: misc improvements --- .../javascript/README.md | 2 +- .../javascript/README.md | 4 +- src/pages/docs/ai-transport/index.mdx | 22 ++++--- .../messaging/chain-of-thought.mdx | 16 ++--- .../docs/ai-transport/messaging/citations.mdx | 58 +++++++++---------- .../ai-transport/messaging/tool-calls.mdx | 6 +- .../identifying-users-and-agents.mdx | 2 +- .../ai-transport/sessions-identity/index.mdx | 2 +- .../sessions-identity/resuming-sessions.mdx | 4 +- .../ai-transport/token-streaming/index.mdx | 5 +- .../token-streaming/message-per-response.mdx | 16 ++--- .../token-streaming/message-per-token.mdx | 16 ++--- .../token-rate-limits.mdx | 6 +- .../anthropic-message-per-response.mdx | 6 +- .../anthropic-message-per-token.mdx | 10 ++-- .../openai-message-per-response.mdx | 6 +- .../ai-transport/openai-message-per-token.mdx | 26 ++++----- 17 files changed, 106 insertions(+), 101 deletions(-) rename src/pages/docs/ai-transport/{messaging => token-streaming}/token-rate-limits.mdx (83%) diff --git a/examples/ai-transport-message-per-response/javascript/README.md b/examples/ai-transport-message-per-response/javascript/README.md index c0e3f41b66..ebb5760df6 100644 --- a/examples/ai-transport-message-per-response/javascript/README.md +++ b/examples/ai-transport-message-per-response/javascript/README.md @@ -18,7 +18,7 @@ Use the following methods to implement AI Transport message-per-response streami - [`channel.subscribe()`](/docs/channels#subscribe): subscribes to messages, handling `message.create`, `message.append`, and `message.update` actions. - [`channel.setOptions()`](/docs/channels/options) with [`rewind`](/docs/channels/options/rewind): enables seamless message recovery during reconnections, delivering historical messages as `message.update` events. -Find out more about [AI Transport](/docs/ai-transport) and [message appending](/docs/ai-transport/features/token-streaming/message-per-response). +Find out more about [AI Transport](/docs/ai-transport) and [message appending](/docs/ai-transport/token-streaming/message-per-response). ## Getting started diff --git a/examples/ai-transport-message-per-token/javascript/README.md b/examples/ai-transport-message-per-token/javascript/README.md index 1a2ade8dec..ffed30477a 100644 --- a/examples/ai-transport-message-per-token/javascript/README.md +++ b/examples/ai-transport-message-per-token/javascript/README.md @@ -17,7 +17,7 @@ Use the following methods to implement AI Transport token streaming: - [`channel.publish()`](/docs/channels#publish): publishes individual tokens as they arrive from the LLM service with response tracking headers. - [`channel.history()`](/docs/channels/history) with [`untilAttach`](/docs/channels/options#attach): enables seamless message recovery during reconnections, ensuring no tokens are lost. -Find out more about [AI Transport](/docs/ai-transport) and [message history](/docs/channels/history). +Find out more about [AI Transport](/docs/ai-transport), [token streaming](/docs/ai-transport/token-streaming), and [message history](/docs/storage-history/history). ## Getting started @@ -57,4 +57,4 @@ Find out more about [AI Transport](/docs/ai-transport) and [message history](/do ## Open in CodeSandbox -In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. \ No newline at end of file +In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. diff --git a/src/pages/docs/ai-transport/index.mdx b/src/pages/docs/ai-transport/index.mdx index 0c906e9340..ceec10b83a 100644 --- a/src/pages/docs/ai-transport/index.mdx +++ b/src/pages/docs/ai-transport/index.mdx @@ -6,7 +6,7 @@ meta_description: "Learn more about Ably's AI Transport and the features that en AI Transport enables you to add a realtime delivery layer to your application, providing the infrastructure required to deliver modern, stateful AI experiences to users. It works seamlessly with any AI model or framework, such as OpenAI, Anthropic, Vercel or LangChain. -AI Transport runs on Ably's [fault-tolerant](/docs/platform/architecture/fault-tolerance) and highly-available platform. The platform enables data to be streamed between all internet-connected devices at [low latencies](/docs/platform/architecture/latency) across the globe. Its elastic global infrastructure delivers enterprise-scale messaging that [effortlessly scales](/docs/platform/architecture/platform-scalability) to meet demand. +AI Transport runs on Ably's [fault-tolerant](/docs/platform/architecture/fault-tolerance) and highly-available platform. The platform supports streaming data between all internet-connected devices at [low latencies](/docs/platform/architecture/latency) across the globe. Its elastic global infrastructure delivers enterprise-scale messaging that [effortlessly scales](/docs/platform/architecture/platform-scalability) to meet demand. Drop AI Transport into your applications to transform them into modern, bi-directional AI experiences that keep users engaged. AI Transport provides the building blocks to deliver reliable, resumable token streams with robust session management and state hydration to always keep your users and agents in sync. @@ -18,6 +18,8 @@ Start learning the basics of AI Transport right away with a getting started guid ### OpenAI +Use the following guides to get started with OpenAI: + {[ { @@ -37,6 +39,8 @@ Start learning the basics of AI Transport right away with a getting started guid ### Anthropic +Use the following guides to get started with Anthropic: + {[ { @@ -65,7 +69,7 @@ Token streaming is the core of how LLMs deliver their responses to users. Tokens Using AI Transport, your token streams are reliable and persistent. They survive modern environments where users change browser tabs, refresh the page or switch devices, and common interruptions such as temporary network loss. Your users can always reconnect and continue where they left off without having to start over. -[Read more about token streaming](/docs/ai-transport/features/token-streaming). +[Read more about token streaming](/docs/ai-transport/token-streaming). ### Bi-directional communication
@@ -73,9 +77,9 @@ AI Transport supports rich, bi-directional communication patterns between users Build sophisticated AI experiences with features like accepting user input for interactive conversations, streaming chain-of-thought reasoning for transparency, attaching citations to responses for verifiability, implementing human-in-the-loop workflows for sensitive operations, and exposing tool calls for generative UI and visibility. -These messaging features work seamlessly with token streaming to create complete, interactive AI experiences. +These messaging features work seamlessly with [token streaming](/docs/ai-transport/token-streaming) to create complete, interactive AI experiences. -[Read more about messaging features](/docs/ai-transport/features/messaging). +[Read more about messaging features](/docs/ai-transport/messaging/accepting-user-input). ### Durable sessions @@ -85,25 +89,25 @@ Communication shouldn't be tied to the connection state of either party. If a us Your users can start a conversation on their mobile and seamlessly continue it on their desktop. Similarly, multiple users can participate in the same conversation with a single agent and they will all remain in sync, in realtime. -[Read more about sessions and identity](/docs/ai-transport/features/sessions-identity). +[Read more about sessions and identity](/docs/ai-transport/sessions-identity). ### Automatic catch-up -AI Transport enables clients to hydrate conversation and session state from the channel, including message history and in-progress responses. +AI Transport enables clients to hydrate conversation and session state from the [channel](/docs/channels), including [message history](/docs/storage-history/history) and in-progress responses. Whether a user is briefly disconnected when they drive through a tunnel, or they're rejoining a conversation the following day of work, AI Transport allows clients to resynchronise the full conversation state, including both historical messages and in-progress responses. Your users are always up to date with the full conversation, in order, anywhere. -[Read more about client hydration](/docs/ai-transport/features/token-streaming/message-per-response#hydration). +[Read more about client hydration](/docs/ai-transport/token-streaming/message-per-response#hydration). ### Background processing AI Transport allows agents to process jobs in the background while users go offline, with full awareness of their online status through realtime presence tracking. -Users can work asynchronously by prompting an agent to perform a task without having to monitor its progress. They can go offline and receive a push notification when the agent has completed the task, or reconnect at any time to seamlessly resume and see all progress made while they were away using [state hydration](#hydration). +Users can work asynchronously by prompting an agent to perform a task without having to monitor its progress. They can go offline and receive a push notification when the agent has completed the task, or reconnect at any time to seamlessly resume and see all progress made while they were away using [state hydration](#catch-up). It also puts you in control of how you manage your application when there aren't any users online. For example, you can choose whether to pause a conversation when a user exits their browser tab, or allow the agent to complete its response for the user to view when they return. -[Read more about status-aware cost controls](/docs/ai-transport/features/sessions-identity/online-status). +[Read more about status-aware cost controls](/docs/ai-transport/sessions-identity/online-status). ### Enterprise controls diff --git a/src/pages/docs/ai-transport/messaging/chain-of-thought.mdx b/src/pages/docs/ai-transport/messaging/chain-of-thought.mdx index 7c76eecea3..15c40d0dc0 100644 --- a/src/pages/docs/ai-transport/messaging/chain-of-thought.mdx +++ b/src/pages/docs/ai-transport/messaging/chain-of-thought.mdx @@ -26,15 +26,15 @@ As an application developer, you decide how to surface chain-of-thought reasonin ### Inline pattern -In the inline pattern, reasoning messages are published to the same channel as model output messages. +In the inline pattern, agents publish reasoning messages to the same channel as model output messages. By publishing all content to a single channel, the inline pattern: - Simplifies channel management by consolidating all conversation content in one place -- Maintains relative order of reasoning and model output messages as they are generated +- Maintains relative order of reasoning and model output messages as the model generates them - Supports retrieving reasoning and response messages together from history -#### Publishing +#### Publish Publish both reasoning and model output messages to a single channel. @@ -86,7 +86,7 @@ To learn how to stream individual tokens as they are generated, see the [token s Set [`echoMessages`](/docs/api/realtime-sdk/types#client-options) to `false` on the agent's Ably client to prevent the agent from receiving its own reasoning and output messages, avoiding billing for [echoed messages](/docs/pub-sub/advanced#echo). -#### Subscribing +#### Subscribe Subscribe to both reasoning and model output messages on the same channel. @@ -140,7 +140,7 @@ To learn about hydrating responses from channel history, including using `rewind ### Threading pattern -In the threading pattern, reasoning messages are published to a separate channel from model output messages. The reasoning channel name is communicated to clients, allowing them to discover where to obtain reasoning content on demand. +In the threading pattern, agents publish reasoning messages to a separate channel from model output messages. The reasoning channel name is communicated to clients, allowing them to discover where to obtain reasoning content on demand. By separating reasoning into its own channel, the threading pattern: @@ -148,11 +148,11 @@ By separating reasoning into its own channel, the threading pattern: - Reduces bandwidth usage by delivering reasoning messages only when users choose to view them - Works well for long reasoning threads, where not all the detail needs to be immediately surfaced to the user, but is helpful to see on demand -#### Publishing +#### Publish Publish model output messages to the main conversation channel and reasoning messages to a dedicated reasoning channel. The reasoning channel name includes the response ID, creating a unique reasoning channel per response. -In the example below, a `start` control message is sent on the main channel at the beginning of each response, which includes the response ID in the message [`extras`](/docs/api/realtime-sdk/messages#extras). Clients can derive the reasoning channel name from the response ID, allowing them to discover and subscribe to the stream of reasoning messages on demand: +In the example below, the agent sends a `start` control message on the main channel at the beginning of each response, which includes the response ID in the message [`extras`](/docs/api/realtime-sdk/messages#extras). Clients can derive the reasoning channel name from the response ID, allowing them to discover and subscribe to the stream of reasoning messages on demand: -#### Subscribing +#### Subscribe Subscribe to the main conversation channel to receive control messages and model output. Subscribe to the reasoning channel on demand, for example in response to a click event. diff --git a/src/pages/docs/ai-transport/messaging/citations.mdx b/src/pages/docs/ai-transport/messaging/citations.mdx index 1944200959..53d473b0be 100644 --- a/src/pages/docs/ai-transport/messaging/citations.mdx +++ b/src/pages/docs/ai-transport/messaging/citations.mdx @@ -31,7 +31,7 @@ Use [message annotations](/docs/messages/annotations) to attach source metadata Message append functionality requires "Message annotations, updates, deletes and appends" to be enabled in a [channel rule](/docs/channels#rules) associated with the channel. To enable the channel rule: @@ -39,7 +39,7 @@ To enable the channel rule: 1. Go to the [Ably dashboard](https://www.ably.com/dashboard) and select your app. 2. Navigate to the "Configuration" > "Rules" section from the left-hand navigation bar. 3. Choose "Add new rule". -4. Enter a channel name or namespace pattern (e.g. `ai` for all channels starting with `ai:`). +4. Enter a channel name or namespace pattern (for example, `ai` for all channels starting with `ai:`). 5. Select the "Message annotations, updates, deletes and appends" option from the list. 6. Click "Create channel rule". @@ -94,49 +94,49 @@ In this example: Including character offsets in annotation data allow UIs to attach inline citation markers to specific portions of the response text. -## Publishing citations +## Publish citations Agents create citations by publishing [message annotations](/docs/messages/annotations) that reference the [`serial`](/docs/messages#properties) of the response message: ```javascript -const channel = realtime.channels.get("ai:{{RANDOM_CHANNEL_NAME}}"); +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); // Publish the AI response message -const response = "The James Webb Space Telescope launched in December 2021 and its first images were released in July 2022."; -const { serials: [msgSerial] } = await channel.publish("response", response); +const response = 'The James Webb Space Telescope launched in December 2021 and its first images were released in July 2022.'; +const { serials: [msgSerial] } = await channel.publish('response', response); // Add citations by annotating the response message await channel.annotations.publish(msgSerial, { - type: "citations:multiple.v1", - name: "science.nasa.gov", + type: 'citations:multiple.v1', + name: 'science.nasa.gov', data: { - url: "https://science.nasa.gov/mission/webb/", - title: "James Webb Space Telescope - NASA Science", + url: 'https://science.nasa.gov/mission/webb/', + title: 'James Webb Space Telescope - NASA Science', startOffset: 43, endOffset: 56, - snippet: "Webb launched on Dec. 25th 2021" + snippet: 'Webb launched on Dec. 25th 2021' } }); await channel.annotations.publish(msgSerial, { - type: "citations:multiple.v1", - name: "en.wikipedia.org", + type: 'citations:multiple.v1', + name: 'en.wikipedia.org', data: { - url: "https://en.wikipedia.org/wiki/James_Webb_Space_Telescope", - title: "James Webb Space Telescope - Wikipedia", + url: 'https://en.wikipedia.org/wiki/James_Webb_Space_Telescope', + title: 'James Webb Space Telescope - Wikipedia', startOffset: 95, endOffset: 104, - snippet: "The telescope's first image was released to the public on 11 July 2022." + snippet: 'The telescope\'s first image was released to the public on 11 July 2022.' } }); ``` -## Subscribing to summaries +## Subscribe to summaries Clients can display a summary of the citations attached to a response by using [annotation summaries](/docs/messages/annotations#annotation-summaries). Clients receive realtime updates to annotation summaries automatically when subscribing to a channel, which are [delivered as messages](/docs/messages/annotations#subscribe) with an `action` of `message.summary`. When using [`multiple.v1`](/docs/messages/annotations#multiple) summarization, counts are grouped by the annotation `name`. @@ -160,13 +160,13 @@ In the example below, the `name` is set to the domain name of the citation sourc ```javascript -const channel = realtime.channels.get("ai:{{RANDOM_CHANNEL_NAME}}"); +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}'); await channel.subscribe((message) => { - if (message.action === "message.summary") { - const citations = message.annotations.summary["citations:multiple.v1"]; + if (message.action === 'message.summary') { + const citations = message.annotations.summary['citations:multiple.v1']; if (citations) { - console.log("Citation summary:", citations); + console.log('Citation summary:', citations); } } }); @@ -208,19 +208,19 @@ When agents publish citations with a [`clientId`](/docs/auth/identified-clients) The `clipped` field indicates whether the summary was truncated due to size limits. This only occurs when a large number of clients with distinct `clientId`s publish annotations. See [large summaries](/docs/messages/annotations#large-summaries) for more information. -## Subscribing to individual citations +## Subscribe to individual citations To access the full citation data, subscribe to [individual annotation events](/docs/messages/annotations#individual-annotations): ```javascript -const channel = realtime.channels.get("ai:{{RANDOM_CHANNEL_NAME}}", { - modes: ["ANNOTATION_SUBSCRIBE"] +const channel = realtime.channels.get('ai:{{RANDOM_CHANNEL_NAME}}', { + modes: ['ANNOTATION_SUBSCRIBE'] }); await channel.annotations.subscribe((annotation) => { - if (annotation.action === "annotation.create" && - annotation.type === "citations:multiple.v1") { + if (annotation.action === 'annotation.create' && + annotation.type === 'citations:multiple.v1') { const { url, title } = annotation.data; console.log(`Citation: ${title} (${url})`); // Output: Citation: James Webb Space Telescope - Wikipedia (https://en.wikipedia.org/wiki/James_Webb_Space_Telescope) @@ -254,7 +254,7 @@ Each annotation event includes the `messageSerial` of the response message it is Subscribe to individual annotation events when you need the full citation data updated in realtime, such as for rendering clickable source links or attaching inline citation markers to specific portions of the response text as citations arrive. -## Retrieving citations on demand +## Retrieve citations on demand Annotations can also be retrieved via the [REST API](/docs/api/rest-api#annotations-list) without maintaining a realtime subscription. diff --git a/src/pages/docs/ai-transport/messaging/tool-calls.mdx b/src/pages/docs/ai-transport/messaging/tool-calls.mdx index d987d2bb26..15de573ff2 100644 --- a/src/pages/docs/ai-transport/messaging/tool-calls.mdx +++ b/src/pages/docs/ai-transport/messaging/tool-calls.mdx @@ -24,7 +24,7 @@ Surfacing tool calls supports: - Human-in-the-loop workflows: Expose tool calls [resolved by humans](/docs/ai-transport/messaging/human-in-the-loop) where users can review and approve tool execution before it happens - Generative UI: Build dynamic, contextual UI components based on the structured tool data -## Publishing tool calls +## Publish tool calls Publish tool call and model output messages to the channel. @@ -98,7 +98,7 @@ To learn how to stream individual tokens as they are generated, see the [token s Set [`echoMessages`](/docs/api/realtime-sdk/types#client-options) to `false` on the agent's Ably client to prevent the agent from receiving its own tool call messages, avoiding billing for [echoed messages](/docs/pub-sub/advanced#echo). -## Subscribing to tool calls +## Subscribe to tool calls Subscribe to tool call and model output messages on the channel. @@ -188,7 +188,7 @@ await channel.subscribe((message) => { ## Client-side tools diff --git a/src/pages/docs/ai-transport/sessions-identity/identifying-users-and-agents.mdx b/src/pages/docs/ai-transport/sessions-identity/identifying-users-and-agents.mdx index 4614747840..f762d4ae87 100644 --- a/src/pages/docs/ai-transport/sessions-identity/identifying-users-and-agents.mdx +++ b/src/pages/docs/ai-transport/sessions-identity/identifying-users-and-agents.mdx @@ -317,7 +317,7 @@ await channel.subscribe((message) => { ## Adding roles and attributes diff --git a/src/pages/docs/ai-transport/sessions-identity/index.mdx b/src/pages/docs/ai-transport/sessions-identity/index.mdx index e1b9be5a05..828fdd6538 100644 --- a/src/pages/docs/ai-transport/sessions-identity/index.mdx +++ b/src/pages/docs/ai-transport/sessions-identity/index.mdx @@ -15,7 +15,7 @@ A session is an interaction between a user (or multiple users) and an AI agent w - Recover from interruptions: Experience connection drops, browser refreshes, or network instability without losing conversation progress - Collaborate in shared sessions: Multiple users can participate in the same conversation simultaneously and remain in sync -These capabilities represent a fundamental shift from traditional request/response AI experiences to continuous, resumable interactions that remain accessible across all user devices and locations. Sessions have a lifecycle: they begin when a user starts interacting with an agent, remain active while the interaction continues, and can persist even when users disconnect - enabling truly asynchronous AI workflows. +These capabilities represent a fundamental shift from traditional request/response AI experiences to continuous, resumable interactions that are accessible across all user devices and locations. Sessions have a lifecycle: they begin when a user starts interacting with an agent, remain active while the interaction continues, and can persist even when users disconnect - enabling truly asynchronous AI workflows. Managing this lifecycle in AI Transport's decoupled architecture involves detecting when users are present, deciding when to stop or continue agent work, and handling scenarios where users disconnect and return. diff --git a/src/pages/docs/ai-transport/sessions-identity/resuming-sessions.mdx b/src/pages/docs/ai-transport/sessions-identity/resuming-sessions.mdx index e5e4fbd159..56c99c93d4 100644 --- a/src/pages/docs/ai-transport/sessions-identity/resuming-sessions.mdx +++ b/src/pages/docs/ai-transport/sessions-identity/resuming-sessions.mdx @@ -37,9 +37,9 @@ For detailed examples of hydrating the token stream, see the token streaming doc When an agent restarts, it needs to resume from where it left off. This involves two distinct concerns: -1. **Recovering the agent's execution state**: The current step in the workflow, local variables, function call results, pending operations, and any other state needed to continue execution. This state is internal to the agent and typically not visible to users. +1. Recovering the agent's execution state: The current step in the workflow, local variables, function call results, pending operations, and any other state needed to continue execution. This state is internal to the agent and typically not visible to users. -2. **Catching up on session activity**: Any user messages, events, or other activity that occurred while the agent was offline. +2. Catching up on session activity: Any user messages, events, or other activity that occurred while the agent was offline. These are separate problems requiring different solutions. Agent execution state is handled by your application and you choose how to persist and restore the internal state your agent needs to resume. diff --git a/src/pages/docs/ai-transport/token-streaming/index.mdx b/src/pages/docs/ai-transport/token-streaming/index.mdx index b727224e71..9dfa3030b0 100644 --- a/src/pages/docs/ai-transport/token-streaming/index.mdx +++ b/src/pages/docs/ai-transport/token-streaming/index.mdx @@ -83,10 +83,11 @@ Example use cases: ## Message events -Different models and frameworks use different events to signal streaming state, for example start events, stop events, tool calls, and content deltas. When you publish a message to an Ably channel, you can set the [message name](/docs/messages#properties) to the event type your client expects, or encode the information in [message extras]((/docs/messages#properties)) or within the payload itself. This allows your frontend to handle each event type appropriately without parsing message content. +Different models and frameworks use different events to signal streaming state, for example start events, stop events, tool calls, and content deltas. When you publish a message to an Ably [channel](/docs/channels), you can set the [message name](/docs/messages#properties) to the event type your client expects, or encode the information in message [`extras`](/docs/messages#properties) or within the payload itself. This allows your frontend to handle each event type appropriately without parsing message content. ## Next steps - Implement token streaming with [message-per-response](/docs/ai-transport/token-streaming/message-per-response) (recommended for most applications) - Implement token streaming with [message-per-token](/docs/ai-transport/token-streaming/message-per-token) for sliding-window use cases -- Explore the guides for integration with specific models and frameworks +- Explore the [guides](/docs/guides/ai-transport/openai-message-per-response) for integration with specific models and frameworks +- Learn about [sessions and identity](/docs/ai-transport/sessions-identity) in AI Transport applications diff --git a/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx b/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx index 3442300f18..88d18426cc 100644 --- a/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx +++ b/src/pages/docs/ai-transport/token-streaming/message-per-response.mdx @@ -7,7 +7,7 @@ Token streaming with message-per-response is a pattern where every token generat This pattern is useful for chat-style applications where you want each complete AI response stored as a single message in history, making it easy to retrieve and display multi-response conversation history. Each agent response becomes a single message that grows as tokens are appended, allowing clients joining mid-stream to catch up efficiently without processing thousands of individual tokens. -The message-per-response pattern includes [automatic rate limit protection](/docs/ai-transport/features/token-streaming/token-rate-limits#per-response) through rollups, making it the recommended approach for most token streaming use cases. +The message-per-response pattern includes [automatic rate limit protection](/docs/ai-transport/token-rate-limits#per-response) through rollups, making it the recommended approach for most token streaming use cases. ## How it works @@ -16,7 +16,7 @@ The message-per-response pattern includes [automatic rate limit protection](/doc 3. **Live delivery**: Clients subscribed to the channel receive each appended token in realtime, allowing them to progressively render the response. 4. **Compacted history**: The channel history contains only one message per agent response, which includes all tokens appended to it concatenated together. -You do not need to mark the message or token stream as completed; the final message content will automatically include the full response constructed from all appended tokens. +You do not need to mark the message or token stream as completed; the final message content automatically includes the full response constructed from all appended tokens.