From cf4c09554b2b3c4e6a947a5cd324f5e80ef834b1 Mon Sep 17 00:00:00 2001
From: ops
Date: Sun, 1 Feb 2026 10:40:27 +0100
Subject: [PATCH 1/3] feat: token display in timeline/session and message
details
---
packages/app/src/pages/session.tsx | 76 ++++-
.../cmd/tui/routes/session/dialog-inspect.tsx | 236 +++++++++++++
.../tui/routes/session/dialog-timeline.tsx | 320 +++++++++++++++++-
.../src/cli/cmd/tui/routes/session/index.tsx | 297 ++++++++++++----
.../src/cli/cmd/tui/ui/dialog-select.tsx | 31 +-
.../opencode/src/cli/cmd/tui/ui/dialog.tsx | 24 +-
packages/opencode/src/provider/transform.ts | 53 ++-
packages/opencode/src/session/index.ts | 1 +
packages/opencode/src/session/llm.ts | 2 +-
packages/opencode/src/util/token.ts | 8 +-
.../opencode/test/provider/transform.test.ts | 128 +++----
packages/sdk/js/src/v2/gen/types.gen.ts | 1 +
packages/ui/src/components/message-part.tsx | 19 +-
packages/ui/src/components/session-turn.tsx | 4 +-
14 files changed, 1028 insertions(+), 172 deletions(-)
create mode 100644 packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx
diff --git a/packages/app/src/pages/session.tsx b/packages/app/src/pages/session.tsx
index d3e74072a86..29087bf4d9e 100644
--- a/packages/app/src/pages/session.tsx
+++ b/packages/app/src/pages/session.tsx
@@ -50,7 +50,7 @@ import { DialogFork } from "@/components/dialog-fork"
import { useCommand } from "@/context/command"
import { useLanguage } from "@/context/language"
import { useNavigate, useParams } from "@solidjs/router"
-import { UserMessage } from "@opencode-ai/sdk/v2"
+import { UserMessage, AssistantMessage } from "@opencode-ai/sdk/v2"
import type { FileDiff } from "@opencode-ai/sdk/v2/client"
import { useSDK } from "@/context/sdk"
import { usePrompt } from "@/context/prompt"
@@ -241,6 +241,8 @@ export default function Page() {
const comments = useComments()
const permission = usePermission()
+ const [pendingAssistantMessage, setPendingAssistantMessage] = createSignal(undefined)
+
const request = createMemo(() => {
const sessionID = params.id
if (!sessionID) return
@@ -279,6 +281,7 @@ export default function Page() {
})
.finally(() => setUi("responding", false))
}
+
const sessionKey = createMemo(() => `${params.dir}${params.id ? "/" + params.id : ""}`)
const tabs = createMemo(() => layout.tabs(sessionKey))
const view = createMemo(() => layout.view(sessionKey))
@@ -1530,6 +1533,38 @@ export default function Page() {
updateHash(message.id)
}
+ const scrollToAnyMessage = (messageID: string, behavior: ScrollBehavior = "smooth") => {
+ const allMsgs = messages()
+ const message = allMsgs.find((m) => m.id === messageID)
+ if (!message) return
+
+ if (message.role === "user") {
+ scrollToMessage(message as UserMessage, behavior)
+ return
+ }
+
+ const assistantMsg = message as AssistantMessage
+ const parentUserMsg = userMessages().find((m) => m.id === assistantMsg.parentID)
+ if (!parentUserMsg) return
+
+ setStore("expanded", parentUserMsg.id, true)
+
+ requestAnimationFrame(() => {
+ const el = document.getElementById(anchor(messageID))
+ if (!el) {
+ requestAnimationFrame(() => {
+ const next = document.getElementById(anchor(messageID))
+ if (!next) return
+ scrollToElement(next, behavior)
+ })
+ return
+ }
+ scrollToElement(el, behavior)
+ })
+
+ updateHash(messageID)
+ }
+
const applyHash = (behavior: ScrollBehavior) => {
const hash = window.location.hash.slice(1)
if (!hash) {
@@ -1540,14 +1575,18 @@ export default function Page() {
const match = hash.match(/^message-(.+)$/)
if (match) {
autoScroll.pause()
- const msg = visibleUserMessages().find((m) => m.id === match[1])
- if (msg) {
- scrollToMessage(msg, behavior)
+ const msg = messages().find((m) => m.id === match[1])
+ if (!msg) {
+ if (visibleUserMessages().find((m) => m.id === match[1])) return
return
}
- // If we have a message hash but the message isn't loaded/rendered yet,
- // don't fall back to "bottom". We'll retry once messages arrive.
+ if (msg.role === "assistant") {
+ setPendingAssistantMessage(match[1])
+ return
+ }
+
+ scrollToMessage(msg as UserMessage, behavior)
return
}
@@ -1642,7 +1681,10 @@ export default function Page() {
const hash = window.location.hash.slice(1)
const match = hash.match(/^message-(.+)$/)
if (!match) return undefined
- return match[1]
+ const hashId = match[1]
+ const msg = messages().find((m) => m.id === hashId)
+ if (msg && msg.role === "assistant") return undefined
+ return hashId
})()
if (!targetId) return
if (store.messageId === targetId) return
@@ -1654,6 +1696,26 @@ export default function Page() {
requestAnimationFrame(() => scrollToMessage(msg, "auto"))
})
+ // Handle pending assistant message navigation
+ createEffect(() => {
+ const sessionID = params.id
+ const ready = messagesReady()
+ if (!sessionID || !ready) return
+
+ // dependencies
+ messages().length
+ store.turnStart
+
+ const targetId = pendingAssistantMessage()
+ if (!targetId) return
+ if (store.messageId === targetId) return
+
+ const msg = messages().find((m) => m.id === targetId)
+ if (!msg) return
+ if (pendingAssistantMessage() === targetId) setPendingAssistantMessage(undefined)
+ requestAnimationFrame(() => scrollToAnyMessage(targetId, "auto"))
+ })
+
createEffect(() => {
const sessionID = params.id
const ready = messagesReady()
diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx
new file mode 100644
index 00000000000..7b8031a156f
--- /dev/null
+++ b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx
@@ -0,0 +1,236 @@
+import { TextAttributes, ScrollBoxRenderable } from "@opentui/core"
+import { useKeyboard } from "@opentui/solid"
+import { useDialog } from "../../ui/dialog"
+import { useTheme } from "@tui/context/theme"
+import type { Part, AssistantMessage } from "@opencode-ai/sdk/v2"
+import { useSync } from "@tui/context/sync"
+import { Clipboard } from "../../util/clipboard"
+import { useToast } from "../../ui/toast"
+import { createSignal, For, Show } from "solid-js"
+import { Token } from "@/util/token"
+
+interface DialogInspectProps {
+ message: AssistantMessage | any
+ parts: Part[]
+}
+
+const PartCard = (props: { title: string; children: any; theme: any }) => (
+
+
+ {props.title}
+
+ {props.children}
+
+)
+
+export function DialogInspect(props: DialogInspectProps) {
+ const sync = useSync()
+ const { theme, syntax } = useTheme()
+ const dialog = useDialog()
+ const toast = useToast()
+ const msg = () => sync.data.message[props.message.sessionID]?.find((m) => m.id === props.message.id)
+ const parts = () => sync.data.part[props.message.id] ?? props.parts
+
+ const [showRaw, setShowRaw] = createSignal(true)
+ dialog.setSize("xlarge")
+
+ let scrollRef: ScrollBoxRenderable | undefined
+
+ const copy = () =>
+ Clipboard.copy(JSON.stringify(props.parts, null, 2))
+ .then(() => toast.show({ message: "Copied", variant: "success" }))
+ .catch(() => toast.show({ message: "Failed", variant: "error" }))
+
+ const toggleRaw = () => setShowRaw((p) => !p)
+
+ useKeyboard((evt) => {
+ const h = {
+ down: () => scrollRef?.scrollBy(1),
+ up: () => scrollRef?.scrollBy(-1),
+ pagedown: () => scrollRef?.scrollBy(scrollRef?.height ?? 20),
+ pageup: () => scrollRef?.scrollBy(-(scrollRef?.height ?? 20)),
+ }
+ const k: Record void> = {
+ c: copy,
+ s: toggleRaw,
+ down: h.down,
+ up: h.up,
+ pagedown: h.pagedown,
+ pageup: h.pageup,
+ }
+ if (k[evt.name]) {
+ evt.preventDefault()
+ k[evt.name]()
+ }
+ })
+
+ const toolEstimate = () => {
+ const p = parts()
+ let sum = 0
+ for (const part of p) {
+ if (part.type === "tool") {
+ const state = (part as any).state
+ if (state?.output) {
+ const output = typeof state.output === "string" ? state.output : JSON.stringify(state.output)
+ sum += Token.estimate(output)
+ }
+ }
+ }
+ return sum
+ }
+
+ const tokenFields =
+ msg()?.role === "assistant"
+ ? {
+ line1: [
+ { l: "Input", v: (msg() as any).tokens?.input },
+ { l: "Output", v: (msg() as any).tokens?.output },
+ { l: "Reasoning", v: (msg() as any).tokens?.reasoning },
+ { l: "Tool", v: toolEstimate(), estimated: true },
+ ],
+ line2: [
+ { l: "Cache Write", v: (msg() as any).tokens?.cache?.write },
+ { l: "Cache Read", v: (msg() as any).tokens?.cache?.read },
+ ],
+ }
+ : null
+
+ const tokenTotal = tokenFields ? [...tokenFields.line1, ...tokenFields.line2].reduce((s, f) => s + (f.v || 0), 0) : 0
+
+ const renderPart = (part: Part) => {
+ if (part.type === "text")
+ return (
+
+ {part.text}
+
+ )
+ if (part.type === "patch")
+ return (
+
+ Updated: {part.files?.join(", ")}
+
+ )
+ if (part.type === "tool")
+ return (
+
+ Input: {JSON.stringify(part.state?.input)}
+
+ {JSON.stringify((part.state as any).output)}
+
+
+ {(part.state as any).error}
+
+
+ )
+ if (part.type === "file")
+ return (
+
+
+ {part.filename} ({part.mime})
+
+
+ )
+ return (
+
+
+
+ )
+ }
+
+ return (
+
+
+
+ Inspection ({props.message.id})
+
+ dialog.clear()}>
+ [esc]
+
+
+
+
+
+
+
+ {(f) => (
+
+ {f.l}:{" "}
+
+ {f.estimated && f.v ? "~" : ""}
+ {(f.v || 0).toLocaleString()}
+
+
+ )}
+
+
+
+
+ {(f) => (
+
+ {f.l}: {(f.v || 0).toLocaleString()}
+
+ )}
+
+
+
+ Total: ~{tokenTotal.toLocaleString()} tokens
+
+
+
+
+ (scrollRef = r)}
+ flexGrow={1}
+ border={["bottom", "top"]}
+ borderColor={theme.borderSubtle}
+ >
+
+ }
+ >
+
+ !["step-start", "step-finish", "reasoning"].includes(p.type))}>
+ {(p) => renderPart(p)}
+
+
+
+
+
+
+
+ ↑↓ PgUp/Dn
+ S raw
+ C copy
+
+
+
+ {showRaw() ? "Parsed" : "Raw"}
+
+
+ Copy
+
+
+
+
+ )
+}
diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx
index 87248a6a8ba..b164c218ae3 100644
--- a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx
+++ b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx
@@ -1,11 +1,112 @@
import { createMemo, onMount } from "solid-js"
import { useSync } from "@tui/context/sync"
-import { DialogSelect, type DialogSelectOption } from "@tui/ui/dialog-select"
-import type { TextPart } from "@opencode-ai/sdk/v2"
+import { DialogSelect, type DialogSelectOption, type DialogSelectRef } from "@tui/ui/dialog-select"
+import type { Part, Message, AssistantMessage, ToolPart, FilePart } from "@opencode-ai/sdk/v2"
import { Locale } from "@/util/locale"
import { DialogMessage } from "./dialog-message"
+import { DialogInspect } from "./dialog-inspect"
import { useDialog } from "../../ui/dialog"
import type { PromptInfo } from "../../component/prompt/history"
+import { Token } from "@/util/token"
+import { useTheme } from "@tui/context/theme"
+import fs from "fs"
+import path from "path"
+import { produce } from "solid-js/store"
+import { Binary } from "@opencode-ai/util/binary"
+import { Global } from "@/global"
+import { useToast } from "../../ui/toast"
+
+// Module-level variable to store the selected message when opening details
+let timelineSelection: string | undefined
+
+function formatTokenCount(tokens: number): string {
+ return tokens.toString().padStart(8)
+}
+
+function getMessageTokens(message: Message, parts: Part[], isCompaction: boolean = false): number {
+ if (message.role === "assistant") {
+ const assistantMsg = message as AssistantMessage
+ let total = 0
+
+ // Tokens used by this message = input + output + reasoning + cache writes (PURE API DATA)
+ if (assistantMsg.tokens) {
+ const input = assistantMsg.tokens.input || 0
+ const output = assistantMsg.tokens.output || 0
+ const cacheWrite = assistantMsg.tokens.cache?.write || 0
+ const reasoning = assistantMsg.tokens.reasoning || 0
+ total = input + output + cacheWrite + reasoning
+ } else {
+ // Fall back to aggregating from step-finish parts
+ for (const part of parts) {
+ if (part.type === "step-finish" && (part as any).tokens) {
+ const tokens = (part as any).tokens
+ total += tokens.input + tokens.output + (tokens.reasoning || 0)
+ }
+ }
+ }
+
+ // NO LONGER add tool output tokens to the total
+ return total
+ }
+
+ // User message - estimate from parts
+ let estimate = 0
+ for (const part of parts) {
+ if (part.type === "text" && !part.synthetic && !part.ignored) {
+ estimate += Token.estimate(part.text)
+ }
+ if (part.type === "file") {
+ const filePart = part as FilePart
+ if (filePart.source?.text?.value) {
+ estimate += Token.estimate(filePart.source.text.value)
+ } else if (filePart.mime.startsWith("image/")) {
+ estimate += Token.estimateImage(filePart.url)
+ }
+ }
+ }
+ return estimate
+}
+
+function getToolOutputEstimate(parts: Part[]): number {
+ let estimate = 0
+ for (const part of parts) {
+ if (part.type === "tool") {
+ const toolPart = part as ToolPart
+ const state = toolPart.state as any
+ if (state?.output) {
+ const output = typeof state.output === "string" ? state.output : JSON.stringify(state.output)
+ estimate += Token.estimate(output)
+ }
+ }
+ }
+ return estimate
+}
+
+function getMessageSummary(parts: Part[]): string {
+ const textPart = parts.find((x) => x.type === "text" && !x.synthetic && !x.ignored)
+ if (textPart && textPart.type === "text") {
+ return textPart.text.replace(/\n/g, " ").trim()
+ }
+
+ const toolParts = parts.filter((x) => x.type === "tool") as ToolPart[]
+ if (toolParts.length > 0) {
+ const tools = toolParts.map((p) => p.tool).join(", ")
+ return `[${tools}]`
+ }
+
+ const reasoningParts = parts.filter((x) => x.type === "reasoning")
+ if (reasoningParts.length > 0) {
+ return "[thinking]"
+ }
+
+ const fileParts = parts.filter((x) => x.type === "file") as FilePart[]
+ if (fileParts.length > 0) {
+ const files = fileParts.map((p) => p.filename || "file").join(", ")
+ return `[files: ${files}]`
+ }
+
+ return "[no content]"
+}
export function DialogTimeline(props: {
sessionID: string
@@ -14,24 +115,100 @@ export function DialogTimeline(props: {
}) {
const sync = useSync()
const dialog = useDialog()
+ const { theme } = useTheme()
+ const toast = useToast()
+
+ // Capture the stored selection and clear it
+ const initialSelection = timelineSelection
+ timelineSelection = undefined
+
+ let selectRef: DialogSelectRef | undefined
onMount(() => {
dialog.setSize("large")
+
+ // Restore selection after mount if we have one
+ if (initialSelection && selectRef) {
+ setTimeout(() => {
+ selectRef?.moveToValue(initialSelection)
+ }, 0)
+ }
})
const options = createMemo((): DialogSelectOption[] => {
const messages = sync.data.message[props.sessionID] ?? []
const result = [] as DialogSelectOption[]
for (const message of messages) {
- if (message.role !== "user") continue
- const part = (sync.data.part[message.id] ?? []).find(
- (x) => x.type === "text" && !x.synthetic && !x.ignored,
- ) as TextPart
- if (!part) continue
+ const parts = sync.data.part[message.id] ?? []
+
+ // Check if this is a compaction summary message
+ const isCompactionSummary = message.role === "assistant" && (message as AssistantMessage).summary === true
+
+ // Get the token count for this specific message (delta only, not cumulative)
+ const messageTokens = getMessageTokens(message, parts, isCompactionSummary)
+
+ // Add tool estimation for assistant messages
+ const toolEstimate = message.role === "assistant" ? getToolOutputEstimate(parts) : 0
+ const delta = messageTokens + toolEstimate
+
+ // Format with ~ included in padding if needed
+ const hasEstimate = toolEstimate > 0
+ const formatted = hasEstimate ? ("~" + delta.toString()).padStart(8) : formatTokenCount(delta)
+
+ // Token count color based on thresholds (cold to hot gradient)
+ // Using delta for color coding
+ let tokenColor = theme.textMuted // grey < 1k
+ if (delta >= 20000) {
+ tokenColor = theme.error // red 20k+
+ } else if (delta >= 10000) {
+ tokenColor = theme.warning // orange 10k+
+ } else if (delta >= 5000) {
+ tokenColor = theme.accent // purple 5k+
+ } else if (delta >= 2000) {
+ tokenColor = theme.secondary // blue 2k+
+ } else if (delta >= 1000) {
+ tokenColor = theme.info // cyan 1k+
+ }
+
+ const summary = getMessageSummary(parts)
+
+ // Skip messages with no content
+ if (summary === "[no content]") continue
+
+ // Debug: Extract token breakdown for assistant messages
+ let tokenDebug = ""
+ if (message.role === "assistant") {
+ const assistantMsg = message as AssistantMessage
+ if (assistantMsg.tokens) {
+ const input = assistantMsg.tokens.input || 0
+ const output = assistantMsg.tokens.output || 0
+ const reasoning = assistantMsg.tokens.reasoning || 0
+ const cacheWrite = assistantMsg.tokens.cache?.write || 0
+ const cacheRead = assistantMsg.tokens.cache?.read || 0
+ const toolEstimate = getToolOutputEstimate(parts)
+ tokenDebug = `(${input}/${output}/${reasoning}/${cacheWrite}/${cacheRead}${toolEstimate > 0 ? `/~${toolEstimate}` : ""}) `
+ }
+ }
+
+ const prefix = isCompactionSummary ? "[compaction] " : message.role === "assistant" ? "agent: " : ""
+ const title = tokenDebug + prefix + summary
+
+ // Add ~ prefix for user messages (estimates only), keeping same width
+ const isUser = message.role === "user"
+ const tokenDisplay = isUser ? ("~" + delta.toString()).padStart(8) : formatted
+ const gutter = [{tokenDisplay}]
+
+ // Normal assistant messages use textMuted for title
+ const isAssistant = message.role === "assistant" && !isCompactionSummary
+
result.push({
- title: part.text.replace(/\n/g, " "),
+ title,
+ gutter: isCompactionSummary ? [{tokenDisplay}] : gutter,
value: message.id,
footer: Locale.time(message.time.created),
+ titleColor: isCompactionSummary ? theme.success : isAssistant ? theme.textMuted : undefined,
+ footerColor: isCompactionSummary ? theme.success : undefined,
+ bg: isCompactionSummary ? theme.success : undefined,
onSelect: (dialog) => {
dialog.replace(() => (
@@ -43,5 +220,130 @@ export function DialogTimeline(props: {
return result
})
- return props.onMove(option.value)} title="Timeline" options={options()} />
+ const handleDelete = async (messageID: string) => {
+ try {
+ const storageBase = path.join(Global.Path.data, "storage")
+
+ // Delete message file
+ const messagePath = path.join(storageBase, "message", props.sessionID, `${messageID}.json`)
+ if (fs.existsSync(messagePath)) {
+ fs.unlinkSync(messagePath)
+ }
+
+ // Delete all part files
+ const partsDir = path.join(storageBase, "part", messageID)
+ if (fs.existsSync(partsDir)) {
+ const partFiles = fs.readdirSync(partsDir)
+ for (const file of partFiles) {
+ fs.unlinkSync(path.join(partsDir, file))
+ }
+ fs.rmdirSync(partsDir)
+ }
+
+ // Invalidate session cache by setting the flag in storage
+ const sessionPath = path.join(
+ storageBase,
+ "session",
+ "project_" + sync.data.session.find((s) => s.id === props.sessionID)?.projectID || "",
+ `${props.sessionID}.json`,
+ )
+ if (fs.existsSync(sessionPath)) {
+ const sessionData = JSON.parse(fs.readFileSync(sessionPath, "utf-8"))
+ sessionData.cacheInvalidated = true
+ fs.writeFileSync(sessionPath, JSON.stringify(sessionData, null, 2))
+ }
+
+ // Update the UI store to remove the message
+ const messages = sync.data.message[props.sessionID] ?? []
+ const result = Binary.search(messages, messageID, (m) => m.id)
+ if (result.found) {
+ sync.set(
+ "message",
+ props.sessionID,
+ produce((draft) => {
+ draft.splice(result.index, 1)
+ }),
+ )
+ }
+
+ // Also remove parts from UI
+ sync.set("part", messageID, [])
+
+ // Update session in UI store to reflect cache invalidation
+ const sessionIndex = sync.data.session.findIndex((s) => s.id === props.sessionID)
+ if (sessionIndex >= 0) {
+ sync.set("session", sessionIndex, "cacheInvalidated", true)
+ }
+
+ toast.show({ message: "Message deleted successfully", variant: "success" })
+ } catch (error) {
+ const message = error instanceof Error ? error.message : "Failed to delete message"
+ toast.show({ message, variant: "error" })
+ }
+ }
+
+ return (
+ {
+ selectRef = r
+ }}
+ onMove={(option) => props.onMove(option.value)}
+ title="Timeline|(input/output/reason/write/read/tool)"
+ options={options()}
+ keybind={[
+ {
+ keybind: { name: "n", ctrl: false, meta: true, shift: false, leader: false },
+ title: "Next user",
+ onTrigger: (option) => {
+ const currentIdx = options().findIndex((opt) => opt.value === option.value)
+ for (let i = currentIdx + 1; i < options().length; i++) {
+ const msgID = options()[i].value
+ const msg = sync.data.message[props.sessionID]?.find((m) => m.id === msgID)
+ if (msg && msg.role === "user") {
+ selectRef?.moveToValue(msgID)
+ break
+ }
+ }
+ },
+ },
+ {
+ keybind: { name: "p", ctrl: false, meta: true, shift: false, leader: false },
+ title: "Previous user",
+ onTrigger: (option) => {
+ const currentIdx = options().findIndex((opt) => opt.value === option.value)
+ for (let i = currentIdx - 1; i >= 0; i--) {
+ const msgID = options()[i].value
+ const msg = sync.data.message[props.sessionID]?.find((m) => m.id === msgID)
+ if (msg && msg.role === "user") {
+ selectRef?.moveToValue(msgID)
+ break
+ }
+ }
+ },
+ },
+ {
+ keybind: { name: "delete", ctrl: false, meta: false, shift: false, leader: false },
+ title: "Delete",
+ onTrigger: (option) => {
+ handleDelete(option.value)
+ },
+ },
+ {
+ keybind: { name: "insert", ctrl: false, meta: false, shift: false, leader: false },
+ title: "Details",
+ onTrigger: (option) => {
+ const messageID = option.value
+ const message = sync.data.message[props.sessionID]?.find((m) => m.id === messageID)
+ const parts = sync.data.part[messageID] ?? []
+
+ if (message && message.role === "assistant") {
+ // Store the current selection before opening details
+ timelineSelection = messageID
+ dialog.push(() => )
+ }
+ },
+ },
+ ]}
+ />
+ )
}
diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
index 209469bad82..5e10df54d8b 100644
--- a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
+++ b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
@@ -30,6 +30,8 @@ import { Prompt, type PromptRef } from "@tui/component/prompt"
import type { AssistantMessage, Part, ToolPart, UserMessage, TextPart, ReasoningPart } from "@opencode-ai/sdk/v2"
import { useLocal } from "@tui/context/local"
import { Locale } from "@/util/locale"
+import { Log } from "@/util/log"
+import { Token } from "@/util/token"
import type { Tool } from "@/tool/tool"
import type { ReadTool } from "@/tool/read"
import type { WriteTool } from "@/tool/write"
@@ -57,6 +59,7 @@ import type { PromptInfo } from "../../component/prompt/history"
import { DialogConfirm } from "@tui/ui/dialog-confirm"
import { DialogTimeline } from "./dialog-timeline"
import { DialogForkFromTimeline } from "./dialog-fork-from-timeline"
+import { DialogInspect } from "./dialog-inspect"
import { DialogSessionRename } from "../../component/dialog-session-rename"
import { Sidebar } from "./sidebar"
import { Flag } from "@/flag/flag"
@@ -1053,6 +1056,11 @@ export function Session() {
last={lastAssistant()?.id === message.id}
message={message as AssistantMessage}
parts={sync.data.part[message.id] ?? []}
+ next={
+ messages()
+ .slice(index() + 1)
+ .find((x) => x.role === "assistant") as AssistantMessage | undefined
+ }
/>
@@ -1130,77 +1138,115 @@ function UserMessage(props: {
}) {
const ctx = use()
const local = useLocal()
- const text = createMemo(() => props.parts.flatMap((x) => (x.type === "text" && !x.synthetic ? [x] : []))[0])
- const files = createMemo(() => props.parts.flatMap((x) => (x.type === "file" ? [x] : [])))
const sync = useSync()
const { theme } = useTheme()
+ const dialog = useDialog()
const [hover, setHover] = createSignal(false)
+ const [tokenHover, setTokenHover] = createSignal(false)
+
+ const liveParts = createMemo(() => sync.data.part[props.message.id] ?? props.parts)
+ const text = createMemo(() => liveParts().flatMap((x) => (x.type === "text" && !x.synthetic ? [x] : []))[0])
+ const files = createMemo(() => liveParts().flatMap((x) => (x.type === "file" ? [x] : [])))
+
const queued = createMemo(() => props.pending && props.message.id > props.pending)
const color = createMemo(() => (queued() ? theme.accent : local.agent.color(props.message.agent)))
const metadataVisible = createMemo(() => queued() || ctx.showTimestamps())
- const compaction = createMemo(() => props.parts.find((x) => x.type === "compaction"))
+ const compaction = createMemo(() => liveParts().find((x) => x.type === "compaction"))
+
+ const tokenTotal = createMemo(() => {
+ const parts = liveParts()
+ let estimate = 0
+ for (const part of parts) {
+ if (part.type === "text" && !part.synthetic && !part.ignored) {
+ estimate += Token.estimate(part.text)
+ }
+ if (part.type === "file") {
+ const filePart = part as any
+ if (filePart.source?.text?.value) {
+ estimate += Token.estimate(filePart.source.text.value)
+ } else if (filePart.mime.startsWith("image/")) {
+ estimate += Token.estimateImage(filePart.url)
+ }
+ }
+ }
+ return estimate
+ })
return (
<>
-
- {
- setHover(true)
- }}
- onMouseOut={() => {
- setHover(false)
- }}
- onMouseUp={props.onMouseUp}
- paddingTop={1}
- paddingBottom={1}
- paddingLeft={2}
- backgroundColor={hover() ? theme.backgroundElement : theme.backgroundPanel}
- flexShrink={0}
- >
- {text()?.text}
-
-
-
- {(file) => {
- const bg = createMemo(() => {
- if (file.mime.startsWith("image/")) return theme.accent
- if (file.mime === "application/pdf") return theme.primary
- return theme.secondary
- })
- return (
-
- {MIME_BADGE[file.mime] ?? file.mime}
- {file.filename}
-
- )
- }}
-
-
-
-
-
-
- {Locale.todayTimeOrDateTime(props.message.time.created)}
-
-
-
- }
+
+
+ {
+ setHover(true)
+ }}
+ onMouseOut={() => {
+ setHover(false)
+ }}
+ onMouseUp={props.onMouseUp}
+ paddingTop={1}
+ paddingBottom={1}
+ paddingLeft={2}
+ backgroundColor={hover() ? theme.backgroundElement : theme.backgroundPanel}
+ flexShrink={0}
+ >
+ {text()?.text}
+
+
+
+ {(file) => {
+ const bg = createMemo(() => {
+ if (file.mime.startsWith("image/")) return theme.accent
+ if (file.mime === "application/pdf") return theme.primary
+ return theme.secondary
+ })
+ return (
+
+ {MIME_BADGE[file.mime] ?? file.mime}
+ {file.filename}
+
+ )
+ }}
+
+
+
+
+
+
+ {Locale.todayTimeOrDateTime(props.message.time.created)}
+
+
+
+ }
+ >
+
+ QUEUED
+
+
+
+
+
+ setTokenHover(true)}
+ onMouseOut={() => setTokenHover(false)}
+ onMouseUp={() => dialog.replace(() => )}
+ backgroundColor={tokenHover() ? theme.backgroundElement : undefined}
>
-
- QUEUED
+
+ ~{tokenTotal().toLocaleString()}
-
+
@@ -1217,11 +1263,30 @@ function UserMessage(props: {
)
}
-function AssistantMessage(props: { message: AssistantMessage; parts: Part[]; last: boolean }) {
+function AssistantMessage(props: { message: AssistantMessage; parts: Part[]; last: boolean; next?: AssistantMessage }) {
const local = useLocal()
const { theme } = useTheme()
+ const ctx = use()
const sync = useSync()
+ const log = Log.create({ service: "session" })
const messages = createMemo(() => sync.data.message[props.message.sessionID] ?? [])
+ const liveMessage = createMemo(
+ () => messages().find((x) => x.id === props.message.id) as AssistantMessage | undefined,
+ )
+ const liveParts = createMemo(() => sync.data.part[props.message.id] ?? props.parts)
+ const isVisiblePart = (part: Part) => {
+ if (part.type === "text") return part.text.trim().length > 0
+ if (part.type === "reasoning") return ctx.showThinking() && part.text.replace("[REDACTED]", "").trim().length > 0
+ if (part.type === "tool") {
+ if (ctx.showDetails()) return true
+ return part.state.status !== "completed"
+ }
+ return false
+ }
+ const visibleParts = createMemo(() => liveParts().some((part) => isVisiblePart(part)))
+ const dialog = useDialog()
+ const [hover, setHover] = createSignal(false)
+ const isLatest = createMemo(() => messages().at(-1)?.id === props.message.id)
const final = createMemo(() => {
return props.message.finish && !["tool-calls", "unknown"].includes(props.message.finish)
@@ -1235,23 +1300,115 @@ function AssistantMessage(props: { message: AssistantMessage; parts: Part[]; las
return props.message.time.completed - user.time.created
})
+ const tokenTotal = createMemo(() => {
+ const message = liveMessage()
+ if (!message) return 0
+ const parts = liveParts()
+ const base = message.tokens
+ ? (message.tokens.input || 0) +
+ (message.tokens.output || 0) +
+ (message.tokens.reasoning || 0) +
+ (message.tokens.cache?.write || 0)
+ : parts.reduce((sum, part) => {
+ if (part.type !== "step-finish" || !(part as any).tokens) return sum
+ const tokens = (part as any).tokens
+ return sum + tokens.input + tokens.output + (tokens.reasoning || 0)
+ }, 0)
+ const tools = parts.reduce((sum, part) => {
+ if (part.type !== "tool") return sum
+ const state = (part as ToolPart).state as any
+ if (!state?.output) return sum
+ const output = typeof state.output === "string" ? state.output : JSON.stringify(state.output)
+ return sum + Token.estimate(output)
+ }, 0)
+ return base + tools
+ })
+
+ const cacheRead = createMemo(() => liveMessage()?.tokens?.cache?.read ?? 0)
+
+ const showActual = createMemo(() => {
+ if (tokenTotal() <= 0) return false
+ if (final()) return true
+ return props.message.finish === "tool-calls"
+ })
+
+ const estimatedTokens = createMemo(() => {
+ const parts = liveParts()
+ return parts.reduce((sum, part) => {
+ if (part.type === "text" && part.text.trim()) {
+ return sum + Token.estimate(part.text)
+ }
+ if (part.type === "reasoning" && part.text.trim()) {
+ const content = part.text.replace("[REDACTED]", "").trim()
+ if (content) return sum + Token.estimate(content)
+ }
+ if (part.type === "tool") {
+ const state = (part as ToolPart).state as any
+ if (state?.output) {
+ const output = typeof state.output === "string" ? state.output : JSON.stringify(state.output)
+ return sum + Token.estimate(output)
+ }
+ }
+ return sum
+ }, 0)
+ })
+
+ createEffect(() => {
+ if (!isLatest()) return
+ log.info("assistant.tokens", {
+ id: props.message.id,
+ parts: liveParts().length,
+ tokens: tokenTotal(),
+ cacheRead: cacheRead(),
+ estimated: estimatedTokens(),
+ })
+ })
+
return (
- <>
-
+
+
{(part, index) => {
const component = createMemo(() => PART_MAPPING[part.type as keyof typeof PART_MAPPING])
+ const isLast = createMemo(() => index() === liveParts().length - 1)
return (
-
-
-
+ <>
+
+
+
+
+
+ setHover(true)}
+ onMouseOut={() => setHover(false)}
+ onMouseUp={() =>
+ dialog.replace(() => )
+ }
+ backgroundColor={hover() ? theme.backgroundElement : undefined}
+ >
+
+ ~{estimatedTokens().toLocaleString()}
+
+ }
+ >
+
+
+ {props.message.finish === "tool-calls" ? "~" : ""}
+ {tokenTotal().toLocaleString()}
+
+ ({cacheRead().toLocaleString()})
+
+
+
+
+
+ >
)
}}
+
- >
+
)
}
diff --git a/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx b/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx
index 56d8453c937..e9914bcef97 100644
--- a/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx
+++ b/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx
@@ -38,12 +38,15 @@ export interface DialogSelectOption {
disabled?: boolean
bg?: RGBA
gutter?: JSX.Element
- onSelect?: (ctx: DialogContext) => void
+ titleColor?: RGBA
+ footerColor?: RGBA
+ onSelect?: (ctx: DialogContext, trigger?: "prompt") => void
}
export type DialogSelectRef = {
filter: string
filtered: DialogSelectOption[]
+ moveToValue: (value: T) => void
}
export function DialogSelect(props: DialogSelectProps) {
@@ -214,17 +217,31 @@ export function DialogSelect(props: DialogSelectProps) {
get filtered() {
return filtered()
},
+ moveToValue(value: T) {
+ const index = flat().findIndex((opt) => isDeepEqual(opt.value, value))
+ if (index >= 0) {
+ moveTo(index, true)
+ }
+ },
}
props.ref?.(ref)
const keybinds = createMemo(() => props.keybind?.filter((x) => !x.disabled && x.keybind) ?? [])
+ const titleParts = createMemo(() => {
+ const parts = props.title.split("|")
+ return { main: parts[0], sub: parts[1] }
+ })
+
return (
- {props.title}
+ {titleParts().main}
+
+ {titleParts().sub}
+
esc
@@ -314,6 +331,8 @@ export function DialogSelect(props: DialogSelectProps) {
active={active()}
current={current()}
gutter={option.gutter}
+ titleColor={option.titleColor}
+ footerColor={option.footerColor}
/>
)
@@ -349,6 +368,8 @@ function Option(props: {
current?: boolean
footer?: JSX.Element | string
gutter?: JSX.Element
+ titleColor?: RGBA
+ footerColor?: RGBA
onMouseOver?: () => void
}) {
const { theme } = useTheme()
@@ -368,20 +389,20 @@ function Option(props: {
- {Locale.truncate(props.title, 61)}
+ {Locale.truncate(props.title, 60)}
{props.description}
- {props.footer}
+ {props.footer}
>
diff --git a/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx b/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx
index 57375ba09db..43ef362c8d6 100644
--- a/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx
+++ b/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx
@@ -8,7 +8,7 @@ import { useToast } from "./toast"
export function Dialog(
props: ParentProps<{
- size?: "medium" | "large"
+ size?: "medium" | "large" | "xlarge"
onClose: () => void
}>,
) {
@@ -26,7 +26,7 @@ export function Dialog(
height={dimensions().height}
alignItems="center"
position="absolute"
- paddingTop={dimensions().height / 4}
+ paddingTop={props.size === "xlarge" ? 2 : dimensions().height / 4}
left={0}
top={0}
backgroundColor={RGBA.fromInts(0, 0, 0, 150)}
@@ -36,7 +36,8 @@ export function Dialog(
if (renderer.getSelection()) return
e.stopPropagation()
}}
- width={props.size === "large" ? 80 : 60}
+ width={props.size === "xlarge" ? 120 : props.size === "large" ? 80 : 60}
+ height={props.size === "xlarge" ? dimensions().height - 4 : undefined}
maxWidth={dimensions().width - 2}
backgroundColor={theme.backgroundPanel}
paddingTop={1}
@@ -53,7 +54,7 @@ function init() {
element: JSX.Element
onClose?: () => void
}[],
- size: "medium" as "medium" | "large",
+ size: "medium" as "medium" | "large" | "xlarge",
})
useKeyboard((evt) => {
@@ -113,13 +114,26 @@ function init() {
},
])
},
+ push(input: any, onClose?: () => void) {
+ if (store.stack.length === 0) {
+ focus = renderer.currentFocusedRenderable
+ focus?.blur()
+ }
+ setStore("stack", [
+ ...store.stack,
+ {
+ element: input,
+ onClose,
+ },
+ ])
+ },
get stack() {
return store.stack
},
get size() {
return store.size
},
- setSize(size: "medium" | "large") {
+ setSize(size: "medium" | "large" | "xlarge") {
setStore("size", size)
},
}
diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts
index ded416e66d9..b8ffe169e12 100644
--- a/packages/opencode/src/provider/transform.ts
+++ b/packages/opencode/src/provider/transform.ts
@@ -45,6 +45,35 @@ export namespace ProviderTransform {
model: Provider.Model,
options: Record,
): ModelMessage[] {
+ // Strip openai itemId metadata following what codex does
+ if (model.api.npm === "@ai-sdk/openai" || options.store === false) {
+ msgs = msgs.map((msg) => {
+ if (msg.providerOptions) {
+ for (const options of Object.values(msg.providerOptions)) {
+ if (options && typeof options === "object") {
+ delete options["itemId"]
+ delete options["reasoningEncryptedContent"]
+ }
+ }
+ }
+ if (!Array.isArray(msg.content)) {
+ return msg
+ }
+ const content = msg.content.map((part) => {
+ if (part.providerOptions) {
+ for (const options of Object.values(part.providerOptions)) {
+ if (options && typeof options === "object") {
+ delete options["itemId"]
+ delete options["reasoningEncryptedContent"]
+ }
+ }
+ }
+ return part
+ })
+ return { ...msg, content } as typeof msg
+ })
+ }
+
// Anthropic rejects messages with empty content - filter out empty string messages
// and remove empty text/reasoning parts from array content
if (model.api.npm === "@ai-sdk/anthropic") {
@@ -167,7 +196,20 @@ export namespace ProviderTransform {
return msgs
}
- function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
+ async function applyCaching(msgs: ModelMessage[], providerID: string, sessionID?: string): Promise {
+ // Skip caching if session cache was invalidated (e.g., message deletion)
+ if (sessionID) {
+ const { Session } = await import("../session")
+ const session = await Session.get(sessionID).catch(() => null)
+ if (session?.cacheInvalidated) {
+ // Clear flag and return without cache control markers
+ await Session.update(sessionID, (draft) => {
+ delete draft.cacheInvalidated
+ }).catch(() => {})
+ return msgs
+ }
+ }
+
const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
@@ -245,7 +287,12 @@ export namespace ProviderTransform {
})
}
- export function message(msgs: ModelMessage[], model: Provider.Model, options: Record) {
+ export async function message(
+ msgs: ModelMessage[],
+ model: Provider.Model,
+ options: Record = {},
+ sessionID?: string,
+ ) {
msgs = unsupportedParts(msgs, model)
msgs = normalizeMessages(msgs, model, options)
if (
@@ -256,7 +303,7 @@ export namespace ProviderTransform {
model.id.includes("claude") ||
model.api.npm === "@ai-sdk/anthropic"
) {
- msgs = applyCaching(msgs, model.providerID)
+ msgs = await applyCaching(msgs, model.providerID, sessionID)
}
// Remap providerOptions keys from stored providerID to expected SDK key
diff --git a/packages/opencode/src/session/index.ts b/packages/opencode/src/session/index.ts
index 556fad01f59..96963a126e0 100644
--- a/packages/opencode/src/session/index.ts
+++ b/packages/opencode/src/session/index.ts
@@ -86,6 +86,7 @@ export namespace Session {
diff: z.string().optional(),
})
.optional(),
+ cacheInvalidated: z.boolean().optional(),
})
.meta({
ref: "Session",
diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts
index 4be6e2538f7..fe51c978d8b 100644
--- a/packages/opencode/src/session/llm.ts
+++ b/packages/opencode/src/session/llm.ts
@@ -248,7 +248,7 @@ export namespace LLM {
async transformParams(args) {
if (args.type === "stream") {
// @ts-expect-error
- args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options)
+ args.params.prompt = await ProviderTransform.message(args.params.prompt, input.model, input.sessionID)
}
return args.params
},
diff --git a/packages/opencode/src/util/token.ts b/packages/opencode/src/util/token.ts
index cee5adc3771..3958f8d5802 100644
--- a/packages/opencode/src/util/token.ts
+++ b/packages/opencode/src/util/token.ts
@@ -1,7 +1,13 @@
export namespace Token {
- const CHARS_PER_TOKEN = 4
+ const CHARS_PER_TOKEN = 4.0 // approximate average
export function estimate(input: string) {
return Math.max(0, Math.round((input || "").length / CHARS_PER_TOKEN))
}
+
+ export function estimateImage(urlOrData: string): number {
+ // Estimate tokens for image data/URLs since providers don't return image token counts
+ // Uses string length as proxy: data URLs contain base64 image data, file paths are small
+ return Math.max(100, Math.round(urlOrData.length / 170))
+ }
}
diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts
index 8e28f1209e1..7e1b6c1cd6a 100644
--- a/packages/opencode/test/provider/transform.test.ts
+++ b/packages/opencode/test/provider/transform.test.ts
@@ -294,7 +294,7 @@ describe("ProviderTransform.schema - gemini array items", () => {
})
describe("ProviderTransform.message - DeepSeek reasoning content", () => {
- test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
+ test("DeepSeek with tool calls includes reasoning_content in providerOptions", async () => {
const msgs = [
{
role: "assistant",
@@ -310,7 +310,7 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => {
},
] as any[]
- const result = ProviderTransform.message(
+ const result = await ProviderTransform.message(
msgs,
{
id: "deepseek/deepseek-chat",
@@ -361,7 +361,7 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => {
expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
})
- test("Non-DeepSeek providers leave reasoning content unchanged", () => {
+ test("Non-DeepSeek providers leave reasoning content unchanged", async () => {
const msgs = [
{
role: "assistant",
@@ -372,7 +372,7 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => {
},
] as any[]
- const result = ProviderTransform.message(
+ const result = await ProviderTransform.message(
msgs,
{
id: "openai/gpt-4",
@@ -450,7 +450,7 @@ describe("ProviderTransform.message - empty image handling", () => {
headers: {},
} as any
- test("should replace empty base64 image with error text", () => {
+ test("should replace empty base64 image with error text", async () => {
const msgs = [
{
role: "user",
@@ -461,7 +461,7 @@ describe("ProviderTransform.message - empty image handling", () => {
},
] as any[]
- const result = ProviderTransform.message(msgs, mockModel, {})
+ const result = await ProviderTransform.message(msgs, mockModel, {})
expect(result).toHaveLength(1)
expect(result[0].content).toHaveLength(2)
@@ -472,7 +472,7 @@ describe("ProviderTransform.message - empty image handling", () => {
})
})
- test("should keep valid base64 images unchanged", () => {
+ test("should keep valid base64 images unchanged", async () => {
const validBase64 =
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
const msgs = [
@@ -485,7 +485,7 @@ describe("ProviderTransform.message - empty image handling", () => {
},
] as any[]
- const result = ProviderTransform.message(msgs, mockModel, {})
+ const result = await ProviderTransform.message(msgs, mockModel, {})
expect(result).toHaveLength(1)
expect(result[0].content).toHaveLength(2)
@@ -493,7 +493,7 @@ describe("ProviderTransform.message - empty image handling", () => {
expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
})
- test("should handle mixed valid and empty images", () => {
+ test("should handle mixed valid and empty images", async () => {
const validBase64 =
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
const msgs = [
@@ -507,7 +507,7 @@ describe("ProviderTransform.message - empty image handling", () => {
},
] as any[]
- const result = ProviderTransform.message(msgs, mockModel, {})
+ const result = await ProviderTransform.message(msgs, mockModel, {})
expect(result).toHaveLength(1)
expect(result[0].content).toHaveLength(3)
@@ -553,21 +553,21 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
headers: {},
} as any
- test("filters out messages with empty string content", () => {
+ test("filters out messages with empty string content", async () => {
const msgs = [
{ role: "user", content: "Hello" },
{ role: "assistant", content: "" },
{ role: "user", content: "World" },
] as any[]
- const result = ProviderTransform.message(msgs, anthropicModel, {})
+ const result = await ProviderTransform.message(msgs, anthropicModel, {})
expect(result).toHaveLength(2)
expect(result[0].content).toBe("Hello")
expect(result[1].content).toBe("World")
})
- test("filters out empty text parts from array content", () => {
+ test("filters out empty text parts from array content", async () => {
const msgs = [
{
role: "assistant",
@@ -579,14 +579,14 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
},
] as any[]
- const result = ProviderTransform.message(msgs, anthropicModel, {})
+ const result = await ProviderTransform.message(msgs, anthropicModel, {})
expect(result).toHaveLength(1)
expect(result[0].content).toHaveLength(1)
expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
})
- test("filters out empty reasoning parts from array content", () => {
+ test("filters out empty reasoning parts from array content", async () => {
const msgs = [
{
role: "assistant",
@@ -598,14 +598,14 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
},
] as any[]
- const result = ProviderTransform.message(msgs, anthropicModel, {})
+ const result = await ProviderTransform.message(msgs, anthropicModel, {})
expect(result).toHaveLength(1)
expect(result[0].content).toHaveLength(1)
expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
})
- test("removes entire message when all parts are empty", () => {
+ test("removes entire message when all parts are empty", async () => {
const msgs = [
{ role: "user", content: "Hello" },
{
@@ -618,14 +618,14 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
{ role: "user", content: "World" },
] as any[]
- const result = ProviderTransform.message(msgs, anthropicModel, {})
+ const result = await ProviderTransform.message(msgs, anthropicModel, {})
expect(result).toHaveLength(2)
expect(result[0].content).toBe("Hello")
expect(result[1].content).toBe("World")
})
- test("keeps non-text/reasoning parts even if text parts are empty", () => {
+ test("keeps non-text/reasoning parts even if text parts are empty", async () => {
const msgs = [
{
role: "assistant",
@@ -636,7 +636,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
},
] as any[]
- const result = ProviderTransform.message(msgs, anthropicModel, {})
+ const result = await ProviderTransform.message(msgs, anthropicModel, {})
expect(result).toHaveLength(1)
expect(result[0].content).toHaveLength(1)
@@ -648,7 +648,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
})
})
- test("keeps messages with valid text alongside empty parts", () => {
+ test("keeps messages with valid text alongside empty parts", async () => {
const msgs = [
{
role: "assistant",
@@ -660,7 +660,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
},
] as any[]
- const result = ProviderTransform.message(msgs, anthropicModel, {})
+ const result = await ProviderTransform.message(msgs, anthropicModel, {})
expect(result).toHaveLength(1)
expect(result[0].content).toHaveLength(2)
@@ -668,7 +668,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
})
- test("does not filter for non-anthropic providers", () => {
+ test("does not filter for non-anthropic providers", async () => {
const openaiModel = {
...anthropicModel,
providerID: "openai",
@@ -687,7 +687,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
},
] as any[]
- const result = ProviderTransform.message(msgs, openaiModel, {})
+ const result = await ProviderTransform.message(msgs, openaiModel, {})
expect(result).toHaveLength(2)
expect(result[0].content).toBe("")
@@ -721,7 +721,7 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
headers: {},
} as any
- test("preserves itemId and reasoningEncryptedContent when store=false", () => {
+ test("strips itemId and reasoningEncryptedContent when store=false", async () => {
const msgs = [
{
role: "assistant",
@@ -749,14 +749,14 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
+ const result = (await ProviderTransform.message(msgs, openaiModel, { store: false })) as any[]
expect(result).toHaveLength(1)
- expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
- expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
+ expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined()
})
- test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
+ test("strips itemId and reasoningEncryptedContent when store=false even when not openai", async () => {
const zenModel = {
...openaiModel,
providerID: "zen",
@@ -788,14 +788,14 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
+ const result = (await ProviderTransform.message(msgs, zenModel, { store: false })) as any[]
expect(result).toHaveLength(1)
- expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
- expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
+ expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined()
})
- test("preserves other openai options including itemId", () => {
+ test("preserves other openai options when stripping itemId", async () => {
const msgs = [
{
role: "assistant",
@@ -814,13 +814,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
+ const result = (await ProviderTransform.message(msgs, openaiModel, { store: false })) as any[]
- expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
})
- test("preserves metadata for openai package when store is true", () => {
+ test("strips metadata for openai package even when store is true", async () => {
const msgs = [
{
role: "assistant",
@@ -838,13 +838,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- // openai package preserves itemId regardless of store value
- const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
+ // openai package always strips itemId regardless of store value
+ const result = (await ProviderTransform.message(msgs, openaiModel, { store: true })) as any[]
- expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
})
- test("preserves metadata for non-openai packages when store is false", () => {
+ test("strips metadata for non-openai packages when store is false", async () => {
const anthropicModel = {
...openaiModel,
providerID: "anthropic",
@@ -871,13 +871,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- // store=false preserves metadata for non-openai packages
- const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
+ // store=false triggers stripping even for non-openai packages
+ const result = (await ProviderTransform.message(msgs, anthropicModel, { store: false })) as any[]
- expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
})
- test("preserves metadata using providerID key when store is false", () => {
+ test("strips metadata using providerID key when store is false", async () => {
const opencodeModel = {
...openaiModel,
providerID: "opencode",
@@ -905,13 +905,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
+ const result = (await ProviderTransform.message(msgs, opencodeModel, { store: false })) as any[]
- expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
+ expect(result[0].content[0].providerOptions?.opencode?.itemId).toBeUndefined()
expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
})
- test("preserves itemId across all providerOptions keys", () => {
+ test("strips itemId across all providerOptions keys", async () => {
const opencodeModel = {
...openaiModel,
providerID: "opencode",
@@ -943,17 +943,17 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
+ const result = (await ProviderTransform.message(msgs, opencodeModel, { store: false })) as any[]
- expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
- expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
- expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
- expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
- expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
- expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
+ expect(result[0].providerOptions?.openai?.itemId).toBeUndefined()
+ expect(result[0].providerOptions?.opencode?.itemId).toBeUndefined()
+ expect(result[0].providerOptions?.extra?.itemId).toBeUndefined()
+ expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
+ expect(result[0].content[0].providerOptions?.opencode?.itemId).toBeUndefined()
+ expect(result[0].content[0].providerOptions?.extra?.itemId).toBeUndefined()
})
- test("does not strip metadata for non-openai packages when store is not false", () => {
+ test("does not strip metadata for non-openai packages when store is not false", async () => {
const anthropicModel = {
...openaiModel,
providerID: "anthropic",
@@ -980,7 +980,7 @@ describe("ProviderTransform.message - strip openai metadata when store=false", (
},
] as any[]
- const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
+ const result = (await ProviderTransform.message(msgs, anthropicModel, {})) as any[]
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
})
@@ -1013,7 +1013,7 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
headers: {},
}) as any
- test("azure keeps 'azure' key and does not remap to 'openai'", () => {
+ test("azure keeps 'azure' key and does not remap to 'openai'", async () => {
const model = createModel("azure", "@ai-sdk/azure")
const msgs = [
{
@@ -1025,13 +1025,13 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
},
] as any[]
- const result = ProviderTransform.message(msgs, model, {})
+ const result = await ProviderTransform.message(msgs, model, {})
expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
expect(result[0].providerOptions?.openai).toBeUndefined()
})
- test("copilot remaps providerID to 'copilot' key", () => {
+ test("copilot remaps providerID to 'copilot' key", async () => {
const model = createModel("github-copilot", "@ai-sdk/github-copilot")
const msgs = [
{
@@ -1043,13 +1043,13 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
},
] as any[]
- const result = ProviderTransform.message(msgs, model, {})
+ const result = await ProviderTransform.message(msgs, model, {})
expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
})
- test("bedrock remaps providerID to 'bedrock' key", () => {
+ test("bedrock remaps providerID to 'bedrock' key", async () => {
const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
const msgs = [
{
@@ -1061,7 +1061,7 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
},
] as any[]
- const result = ProviderTransform.message(msgs, model, {})
+ const result = await ProviderTransform.message(msgs, model, {})
expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
@@ -1069,7 +1069,7 @@ describe("ProviderTransform.message - providerOptions key remapping", () => {
})
describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
- test("adds cachePoint", () => {
+ test("adds cachePoint", async () => {
const model = {
id: "amazon-bedrock/custom-claude-sonnet-4.5",
providerID: "amazon-bedrock",
@@ -1091,7 +1091,7 @@ describe("ProviderTransform.message - claude w/bedrock custom inference profile"
},
] as any[]
- const result = ProviderTransform.message(msgs, model, {})
+ const result = await ProviderTransform.message(msgs, model, {})
expect(result[0].providerOptions?.bedrock).toEqual(
expect.objectContaining({
diff --git a/packages/sdk/js/src/v2/gen/types.gen.ts b/packages/sdk/js/src/v2/gen/types.gen.ts
index 0556e1ad945..34e0d7dd3d0 100644
--- a/packages/sdk/js/src/v2/gen/types.gen.ts
+++ b/packages/sdk/js/src/v2/gen/types.gen.ts
@@ -783,6 +783,7 @@ export type Session = {
snapshot?: string
diff?: string
}
+ cacheInvalidated?: boolean
}
export type EventSessionCreated = {
diff --git a/packages/ui/src/components/message-part.tsx b/packages/ui/src/components/message-part.tsx
index b8a7ce0b505..53b4435052c 100644
--- a/packages/ui/src/components/message-part.tsx
+++ b/packages/ui/src/components/message-part.tsx
@@ -92,6 +92,7 @@ function DiagnosticsDisplay(props: { diagnostics: Diagnostic[] }): JSX.Element {
export interface MessageProps {
message: MessageType
parts: PartType[]
+ id?: string
}
export interface MessagePartProps {
@@ -277,18 +278,18 @@ export function Message(props: MessageProps) {
return (
- {(userMessage) => }
+ {(userMessage) => }
{(assistantMessage) => (
-
+
)}
)
}
-export function AssistantMessageDisplay(props: { message: AssistantMessage; parts: PartType[] }) {
+export function AssistantMessageDisplay(props: { message: AssistantMessage; parts: PartType[]; id?: string }) {
const emptyParts: PartType[] = []
const filteredParts = createMemo(
() =>
@@ -298,10 +299,16 @@ export function AssistantMessageDisplay(props: { message: AssistantMessage; part
emptyParts,
{ equals: same },
)
- return {(part) => }
+ return (
+
+ )
}
-export function UserMessageDisplay(props: { message: UserMessage; parts: PartType[] }) {
+export function UserMessageDisplay(props: { message: UserMessage; parts: PartType[]; id?: string }) {
const dialog = useDialog()
const i18n = useI18n()
const [copied, setCopied] = createSignal(false)
@@ -370,7 +377,7 @@ export function UserMessageDisplay(props: { message: UserMessage; parts: PartTyp
}
return (
-
+
0}>
diff --git a/packages/ui/src/components/session-turn.tsx b/packages/ui/src/components/session-turn.tsx
index 48d6337edba..2fce28a8d05 100644
--- a/packages/ui/src/components/session-turn.tsx
+++ b/packages/ui/src/components/session-turn.tsx
@@ -93,6 +93,7 @@ function AssistantMessageItem(props: {
responsePartId: string | undefined
hideResponsePart: boolean
hideReasoning: boolean
+ anchorId?: string
}) {
const data = useData()
const emptyParts: PartType[] = []
@@ -122,7 +123,7 @@ function AssistantMessageItem(props: {
return parts.filter((part) => part?.id !== responsePartId)
})
- return
+ return
}
export function SessionTurn(
@@ -673,6 +674,7 @@ export function SessionTurn(
responsePartId={responsePartId()}
hideResponsePart={hideResponsePart()}
hideReasoning={!working()}
+ anchorId={`message-${assistantMessage.id}`}
/>
)}
From f5760fdef0dfe760c1885fb2d76c7003243d1297 Mon Sep 17 00:00:00 2001
From: ops
Date: Mon, 2 Feb 2026 05:59:34 +0100
Subject: [PATCH 2/3] no estimates in token sums
---
.../cli/cmd/tui/routes/session/dialog-inspect.tsx | 6 ++++--
.../cmd/tui/routes/session/dialog-timeline.tsx | 14 ++++++--------
.../src/cli/cmd/tui/routes/session/index.tsx | 15 +++------------
3 files changed, 13 insertions(+), 22 deletions(-)
diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx
index 7b8031a156f..633a8e5917d 100644
--- a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx
+++ b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-inspect.tsx
@@ -95,7 +95,9 @@ export function DialogInspect(props: DialogInspectProps) {
}
: null
- const tokenTotal = tokenFields ? [...tokenFields.line1, ...tokenFields.line2].reduce((s, f) => s + (f.v || 0), 0) : 0
+ const tokenTotal = tokenFields
+ ? [...tokenFields.line1.filter((f) => !f.estimated), ...tokenFields.line2].reduce((s, f) => s + (f.v || 0), 0)
+ : 0
const renderPart = (part: Part) => {
if (part.type === "text")
@@ -179,7 +181,7 @@ export function DialogInspect(props: DialogInspectProps) {
- Total: ~{tokenTotal.toLocaleString()} tokens
+ Total: {tokenTotal.toLocaleString()} tokens
diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx
index b164c218ae3..863279b8a65 100644
--- a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx
+++ b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx
@@ -147,13 +147,12 @@ export function DialogTimeline(props: {
// Get the token count for this specific message (delta only, not cumulative)
const messageTokens = getMessageTokens(message, parts, isCompactionSummary)
- // Add tool estimation for assistant messages
+ // Calculate tool estimate for debug display only (not added to total)
const toolEstimate = message.role === "assistant" ? getToolOutputEstimate(parts) : 0
- const delta = messageTokens + toolEstimate
+ const delta = messageTokens
- // Format with ~ included in padding if needed
- const hasEstimate = toolEstimate > 0
- const formatted = hasEstimate ? ("~" + delta.toString()).padStart(8) : formatTokenCount(delta)
+ // Format token count (no ~ prefix, pure API data)
+ const formatted = formatTokenCount(delta)
// Token count color based on thresholds (cold to hot gradient)
// Using delta for color coding
@@ -193,9 +192,8 @@ export function DialogTimeline(props: {
const prefix = isCompactionSummary ? "[compaction] " : message.role === "assistant" ? "agent: " : ""
const title = tokenDebug + prefix + summary
- // Add ~ prefix for user messages (estimates only), keeping same width
- const isUser = message.role === "user"
- const tokenDisplay = isUser ? ("~" + delta.toString()).padStart(8) : formatted
+ // Pure API data display (no ~ prefix for user messages)
+ const tokenDisplay = formatTokenCount(delta)
const gutter = [{tokenDisplay}]
// Normal assistant messages use textMuted for title
diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
index 5e10df54d8b..fb944c706e6 100644
--- a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
+++ b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx
@@ -1314,14 +1314,8 @@ function AssistantMessage(props: { message: AssistantMessage; parts: Part[]; las
const tokens = (part as any).tokens
return sum + tokens.input + tokens.output + (tokens.reasoning || 0)
}, 0)
- const tools = parts.reduce((sum, part) => {
- if (part.type !== "tool") return sum
- const state = (part as ToolPart).state as any
- if (!state?.output) return sum
- const output = typeof state.output === "string" ? state.output : JSON.stringify(state.output)
- return sum + Token.estimate(output)
- }, 0)
- return base + tools
+ // Pure API data - don't add tool estimate
+ return base
})
const cacheRead = createMemo(() => liveMessage()?.tokens?.cache?.read ?? 0)
@@ -1394,10 +1388,7 @@ function AssistantMessage(props: { message: AssistantMessage; parts: Part[]; las
}
>
-
- {props.message.finish === "tool-calls" ? "~" : ""}
- {tokenTotal().toLocaleString()}
-
+ {tokenTotal().toLocaleString()}
({cacheRead().toLocaleString()})
From 631dcea986dd521557d156a5645cb2de4b8bf994 Mon Sep 17 00:00:00 2001
From: ops
Date: Mon, 2 Feb 2026 06:02:45 +0100
Subject: [PATCH 3/3] set token estimate average to 3.5
---
packages/opencode/src/util/token.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/packages/opencode/src/util/token.ts b/packages/opencode/src/util/token.ts
index 3958f8d5802..998185e5afa 100644
--- a/packages/opencode/src/util/token.ts
+++ b/packages/opencode/src/util/token.ts
@@ -1,5 +1,5 @@
export namespace Token {
- const CHARS_PER_TOKEN = 4.0 // approximate average
+ const CHARS_PER_TOKEN = 3.5 // approximate average
export function estimate(input: string) {
return Math.max(0, Math.round((input || "").length / CHARS_PER_TOKEN))