From bd09165034befe5d088cfd716f738684702721e6 Mon Sep 17 00:00:00 2001 From: Ammar Date: Thu, 9 Apr 2026 12:01:24 -0500 Subject: [PATCH] =?UTF-8?q?=F0=9F=A4=96=20fix:=20strip=20unsupported=20tru?= =?UTF-8?q?ncation=20from=20Codex=20OAuth=20requests?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ChatGPT Codex Responses endpoint rejects the OpenAI `truncation` parameter, so Codex OAuth requests must strip it even when the public Responses API would accept it. Restore that behavior in the request normalizer and lock it in with regression coverage. --- _Generated with `mux` • Model: `openai:gpt-5.4` • Thinking: `xhigh` • Cost: `$1.79`_ --- .../services/providerModelFactory.test.ts | 18 +++++++++++------ src/node/services/providerModelFactory.ts | 20 +++++++++---------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/src/node/services/providerModelFactory.test.ts b/src/node/services/providerModelFactory.test.ts index 73d3c81f93..e9eeef74e3 100644 --- a/src/node/services/providerModelFactory.test.ts +++ b/src/node/services/providerModelFactory.test.ts @@ -37,7 +37,7 @@ async function withTempConfig( } describe("normalizeCodexResponsesBody", () => { - it("enforces Codex-compatible fields and lifts system prompts into instructions", () => { + it("enforces Codex-compatible fields, strips truncation, and lifts system prompts into instructions", () => { const normalized = JSON.parse( normalizeCodexResponsesBody( JSON.stringify({ @@ -65,11 +65,11 @@ describe("normalizeCodexResponsesBody", () => { store: boolean; temperature: number; text: unknown; - truncation: string; + truncation?: unknown; }; expect(normalized.store).toBe(false); - expect(normalized.truncation).toBe("disabled"); + expect(normalized.truncation).toBeUndefined(); expect(normalized.temperature).toBe(0.2); expect(normalized.text).toEqual({ format: { type: "json_schema", name: "result" } }); expect(normalized.metadata).toBeUndefined(); @@ -77,7 +77,7 @@ describe("normalizeCodexResponsesBody", () => { expect(normalized.input).toEqual([{ role: "user", content: "Ship the fix." }]); }); - it("preserves explicit auto truncation", () => { + it("strips explicit truncation because the Codex endpoint rejects it", () => { const normalized = JSON.parse( normalizeCodexResponsesBody( JSON.stringify({ @@ -86,9 +86,9 @@ describe("normalizeCodexResponsesBody", () => { truncation: "auto", }) ) - ) as { truncation: string; store: boolean }; + ) as { truncation?: unknown; store: boolean }; - expect(normalized.truncation).toBe("auto"); + expect(normalized.truncation).toBeUndefined(); expect(normalized.store).toBe(false); }); }); @@ -397,6 +397,12 @@ describe("ProviderModelFactory GitHub Copilot", () => { expect(requests).toHaveLength(1); expect(requests[0]?.input).toBe(CODEX_ENDPOINT); expect(requests[0]?.init?.body).toBe(normalizeCodexResponsesBody(originalBody)); + const normalizedBody = JSON.parse( + (requests[0]?.init?.body as string | undefined) ?? "{}" + ) as { + truncation?: unknown; + }; + expect(normalizedBody.truncation).toBeUndefined(); const headers = new Headers(requests[0]?.init?.headers); expect(headers.get("authorization")).toBe("Bearer test-access-token"); diff --git a/src/node/services/providerModelFactory.ts b/src/node/services/providerModelFactory.ts index b7f42cc38f..4e69937418 100644 --- a/src/node/services/providerModelFactory.ts +++ b/src/node/services/providerModelFactory.ts @@ -597,7 +597,6 @@ const CODEX_ALLOWED_PARAMS = new Set([ "top_p", "include", "text", // structured output via Output.object -> text.format - "truncation", ]); // --------------------------------------------------------------------------- @@ -628,10 +627,10 @@ function extractTextContent(content: unknown): string { export function normalizeCodexResponsesBody(body: string): string { const json = JSON.parse(body) as Record; - const truncation = json.truncation; - if (truncation !== "auto" && truncation !== "disabled") { - json.truncation = "disabled"; - } + + // ChatGPT's Codex endpoint is stricter than the public OpenAI Responses API + // and currently rejects the `truncation` field entirely. + delete json.truncation; // Codex-compatible Responses requests must disable storage and strip unsupported params. json.store = false; @@ -1116,9 +1115,9 @@ export class ProviderModelFactory { const baseFetch = getProviderFetch(providerConfig); const codexOauthService = this.codexOauthService; - // Wrap fetch to default truncation to "disabled" for OpenAI Responses API calls. - // This preserves our compaction handling while still allowing explicit truncation (e.g., auto). - const fetchWithOpenAITruncation = Object.assign( + // Wrap fetch so Codex OAuth Responses requests are normalized before + // they are rerouted from api.openai.com to chatgpt.com's Codex backend. + const fetchWithOpenAICodexNormalization = Object.assign( async ( input: Parameters[0], init?: Parameters[1] @@ -1149,7 +1148,8 @@ export class ProviderModelFactory { const body = init?.body; // Only parse the JSON body when routing through Codex OAuth, since Codex - // requires instruction lifting, store=false, and Responses truncation. + // requires instruction lifting, store=false, and stripping unsupported + // Responses fields like `truncation`. if ( shouldRouteThroughCodexOauth && isOpenAIResponses && @@ -1212,7 +1212,7 @@ export class ProviderModelFactory { // Lazy-load OpenAI provider to reduce startup time const { createOpenAI } = await PROVIDER_REGISTRY.openai(); - const providerFetch = fetchWithOpenAITruncation as typeof fetch; + const providerFetch = fetchWithOpenAICodexNormalization as typeof fetch; const provider = createOpenAI({ ...configWithCreds, // Cast is safe: our fetch implementation is compatible with the SDK's fetch type.