Skip to content
45 changes: 45 additions & 0 deletions examples/eleven_v3_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
"""
Example demonstrating ElevenLabs eleven_v3 TTS model usage with LiveKit Agents.

The eleven_v3 model doesn't support WebSocket streaming, so the plugin automatically
uses HTTP streaming with chunked transfer encoding instead.

To compare latency between models:
- Set USE_V3 = True → Uses eleven_v3 with HTTP streaming (new)
- Set USE_V3 = False → Uses eleven_turbo_v2_5 with WebSocket (existing)

Look for the "tts_ttfb" metric in the console to compare TTS latency.
"""

from livekit.agents import AgentServer, JobContext, cli
from livekit.agents.voice import Agent, AgentSession
from livekit.plugins import elevenlabs, openai, silero

server = AgentServer()


@server.rtc_session()
async def entrypoint(ctx: JobContext):
# Toggle between models to compare latency:
# - eleven_v3: Uses HTTP streaming (our new implementation)
# - eleven_turbo_v2_5: Uses WebSocket streaming (existing)

USE_V3 = True # Set to False to test eleven_turbo_v2_5

agent = Agent(
instructions="You are a helpful voice assistant. Keep responses very short - 1 sentence max.",
stt=openai.STT(), # OpenAI Whisper
llm=openai.LLM(model="gpt-4o"), # Faster than gpt-4o-mini for streaming
tts=elevenlabs.TTS(
model="eleven_v3" if USE_V3 else "eleven_turbo_v2_5",
voice_id="EXAVITQu4vr4xnSDxMaL",
),
vad=silero.VAD.load(),
)

session = AgentSession()
await session.start(agent=agent, room=ctx.room)


if __name__ == "__main__":
cli.run_app(server)
10 changes: 10 additions & 0 deletions livekit-plugins/livekit-plugins-elevenlabs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,13 @@ pip install livekit-plugins-elevenlabs
## Pre-requisites

You'll need an API key from ElevenLabs. It can be set as an environment variable: `ELEVEN_API_KEY`

## Supported Models

All ElevenLabs TTS models are supported, including:
- `eleven_v3` - Most expressive model with emotion and delivery control
- `eleven_turbo_v2_5` - Fast, high-quality multilingual model (default)
- `eleven_flash_v2_5`, `eleven_flash_v2` - Ultra-fast models
- And more...

**Note:** The `eleven_v3` model uses HTTP streaming instead of WebSocket for compatibility, as it doesn't support WebSocket connections. The plugin automatically handles this difference.
Loading