diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 0ebd8bbf59fe..7e306bd2d8b9 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -50,27 +50,44 @@ function normalizeMessages( model: Provider.Model, _options: Record, ): ModelMessage[] { - // Anthropic rejects messages with empty content - filter out empty string messages - // and remove empty text/reasoning parts from array content - if (model.api.npm === "@ai-sdk/anthropic" || model.api.npm === "@ai-sdk/amazon-bedrock") { - msgs = msgs - .map((msg) => { - if (typeof msg.content === "string") { - if (msg.content === "") return undefined - return msg + const modelID = `${model.id} ${model.api.id}`.toLowerCase() + const preserveAdaptiveAnthropicReasoning = !!anthropicAdaptiveEfforts(modelID) + + // Many providers (Anthropic, Bedrock, and proxies like openai-compatible + // forwarding to Bedrock) reject messages with empty text content blocks. + // Filter them out universally - empty text blocks are never useful. + msgs = msgs + .map((msg) => { + // Anthropic adaptive thinking signs assistant reasoning blocks positionally. + // Preserve these messages verbatim, including whitespace-only text separators. + const preserveAssistantReasoning = + preserveAdaptiveAnthropicReasoning && + msg.role === "assistant" && + Array.isArray(msg.content) && + msg.content.some((part) => part.type === "reasoning") + + if (preserveAssistantReasoning) return msg + + if (typeof msg.content === "string") { + if (!msg.content.trim()) return undefined + return msg + } + if (!Array.isArray(msg.content)) return msg + const filtered = msg.content.filter((part) => { + if (part.type === "text" || part.type === "reasoning") { + if (typeof part.text !== "string") return false + return part.text.trim().length > 0 } - if (!Array.isArray(msg.content)) return msg - const filtered = msg.content.filter((part) => { - if (part.type === "text" || part.type === "reasoning") { - return part.text !== "" - } - return true - }) - if (filtered.length === 0) return undefined - return { ...msg, content: filtered } + return true }) - .filter((msg): msg is ModelMessage => msg !== undefined && msg.content !== "") - } + if (filtered.length === 0) return undefined + return { ...msg, content: filtered } + }) + .filter((msg): msg is ModelMessage => { + if (!msg) return false + if (typeof msg.content !== "string") return true + return msg.content.trim().length > 0 + }) if (model.api.id.includes("claude")) { const scrub = (id: string) => id.replace(/[^a-zA-Z0-9_-]/g, "_") @@ -390,10 +407,12 @@ const WIDELY_SUPPORTED_EFFORTS = ["low", "medium", "high"] const OPENAI_EFFORTS = ["none", "minimal", ...WIDELY_SUPPORTED_EFFORTS, "xhigh"] function anthropicAdaptiveEfforts(apiId: string): string[] | null { - if (["opus-4-7", "opus-4.7"].some((v) => apiId.includes(v))) { + const normalized = apiId.toLowerCase() + + if (["opus-4-7", "opus-4.7"].some((v) => normalized.includes(v))) { return ["low", "medium", "high", "xhigh", "max"] } - if (["opus-4-6", "opus-4.6", "sonnet-4-6", "sonnet-4.6"].some((v) => apiId.includes(v))) { + if (["opus-4-6", "opus-4.6", "sonnet-4-6", "sonnet-4.6"].some((v) => normalized.includes(v))) { return ["low", "medium", "high", "max"] } return null diff --git a/packages/opencode/test/cli/tui/plugin-loader.test.ts b/packages/opencode/test/cli/tui/plugin-loader.test.ts index f5b04ff434f5..387f0b0a80cc 100644 --- a/packages/opencode/test/cli/tui/plugin-loader.test.ts +++ b/packages/opencode/test/cli/tui/plugin-loader.test.ts @@ -38,6 +38,7 @@ async function load(): Promise { const backup = await Bun.file(globalConfigPath) .text() .catch(() => undefined) + const backupPluginMetaFile = process.env.OPENCODE_PLUGIN_META_FILE await using tmp = await tmpdir({ init: async (dir) => { @@ -296,6 +297,7 @@ export default { return { localThemeFile, + localThemePath, invalidThemeFile, globalThemeFile, preloadedThemeFile, @@ -324,6 +326,7 @@ export default { }) const cwd = spyOn(process, "cwd").mockImplementation(() => tmp.path) const wait = spyOn(TuiConfig, "waitForDependencies").mockResolvedValue() + process.env.OPENCODE_PLUGIN_META_FILE = path.join(tmp.path, "plugin-meta.json") try { expect(addTheme(tmp.extra.preloadedThemeName, { theme: { primary: "#303030" } })).toBe(true) @@ -331,7 +334,7 @@ export default { const localOpts = { fn_marker: tmp.extra.fnMarker, marker: tmp.extra.localMarker, - source: path.join(tmp.path, tmp.extra.localThemeFile), + source: tmp.extra.localThemePath, dest: tmp.extra.localDest, theme_path: `./${tmp.extra.localThemeFile}`, theme_name: tmp.extra.localThemeName, @@ -453,6 +456,11 @@ export default { await TuiPluginRuntime.dispose() cwd.mockRestore() wait.mockRestore() + if (backupPluginMetaFile === undefined) { + delete process.env.OPENCODE_PLUGIN_META_FILE + } else { + process.env.OPENCODE_PLUGIN_META_FILE = backupPluginMetaFile + } if (backup === undefined) { await fs.rm(globalConfigPath, { force: true }) } else { diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts index e195d9b17720..701d8514fa40 100644 --- a/packages/opencode/test/provider/transform.test.ts +++ b/packages/opencode/test/provider/transform.test.ts @@ -1230,6 +1230,42 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => expect(result[0].content[1]).toEqual({ type: "text", text: "Result" }) }) + test("preserves whitespace text separators in assistant reasoning messages", () => { + const adaptiveAnthropicModel = { + ...anthropicModel, + id: "anthropic/claude-opus-4-7", + api: { + ...anthropicModel.api, + id: "claude-opus-4-7-20260401", + }, + capabilities: { + ...anthropicModel.capabilities, + reasoning: true, + }, + } + + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Thinking step 1" }, + { type: "text", text: " " }, + { type: "reasoning", text: "Thinking step 2" }, + { type: "text", text: "Result" }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, adaptiveAnthropicModel, {}) + + expect(result).toHaveLength(1) + expect(result[0].content).toHaveLength(4) + expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking step 1" }) + expect(result[0].content[1]).toEqual({ type: "text", text: " " }) + expect(result[0].content[2]).toEqual({ type: "reasoning", text: "Thinking step 2" }) + expect(result[0].content[3]).toEqual({ type: "text", text: "Result" }) + }) + test("filters empty content for bedrock provider", () => { const bedrockModel = { ...anthropicModel, @@ -1262,30 +1298,43 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => expect(result[1].content[0]).toEqual({ type: "text", text: "Answer" }) }) - test("does not filter for non-anthropic providers", () => { - const openaiModel = { + test("filters empty content for all providers including openai-compatible", () => { + const model = { ...anthropicModel, - providerID: "openai", + providerID: "ducc", api: { - id: "gpt-4", - url: "https://api.openai.com", - npm: "@ai-sdk/openai", + id: "ducc/claude-sonnet-4-6", + url: "https://example.com/v1/", + npm: "@ai-sdk/openai-compatible", }, } const msgs = [ { role: "assistant", content: "" }, + { role: "assistant", content: " " }, { role: "assistant", content: [{ type: "text", text: "" }], }, + { + role: "assistant", + content: [{ type: "text", text: " " }], + }, + { + role: "user", + content: [ + { type: "text", text: "" }, + { type: "text", text: "hello" }, + ], + }, ] as any[] - const result = ProviderTransform.message(msgs, openaiModel, {}) + const result = ProviderTransform.message(msgs, model, {}) - expect(result).toHaveLength(2) - expect(result[0].content).toBe("") - expect(result[1].content).toHaveLength(1) + expect(result).toHaveLength(1) + expect(result[0].role).toBe("user") + expect(result[0].content).toHaveLength(1) + expect(result[0].content[0]).toMatchObject({ type: "text", text: "hello" }) }) test("splits anthropic assistant messages when text trails tool calls", () => { diff --git a/packages/opencode/test/session/message-v2.test.ts b/packages/opencode/test/session/message-v2.test.ts index 6d4e994a8791..a94a61549475 100644 --- a/packages/opencode/test/session/message-v2.test.ts +++ b/packages/opencode/test/session/message-v2.test.ts @@ -719,6 +719,67 @@ describe("session.message-v2.toModelMessage", () => { ]) }) + test("preserves whitespace text parts between assistant reasoning blocks", async () => { + const assistantID = "m-assistant-reasoning" + const adaptiveModel: Provider.Model = { + ...model, + id: ModelID.make("anthropic/claude-opus-4-7"), + providerID: ProviderID.make("anthropic"), + api: { + id: "claude-opus-4-7-20260401", + url: "https://api.anthropic.com", + npm: "@ai-sdk/anthropic", + }, + capabilities: { + ...model.capabilities, + reasoning: true, + }, + } + + const input: MessageV2.WithParts[] = [ + { + info: assistantInfo(assistantID, "m-parent", undefined, { + providerID: adaptiveModel.providerID, + modelID: adaptiveModel.api.id, + }), + parts: [ + { + ...basePart(assistantID, "r1"), + type: "reasoning", + text: "thinking step 1", + time: { start: 0 }, + }, + { + ...basePart(assistantID, "t1"), + type: "text", + text: " ", + }, + { + ...basePart(assistantID, "r2"), + type: "reasoning", + text: "thinking step 2", + time: { start: 1 }, + }, + { + ...basePart(assistantID, "t2"), + type: "text", + text: "final answer", + }, + ] as MessageV2.Part[], + }, + ] + + const result = await MessageV2.toModelMessages(input, adaptiveModel) + + expect(result).toHaveLength(1) + expect(result[0].role).toBe("assistant") + expect(result[0].content).toHaveLength(4) + expect(result[0].content[0]).toMatchObject({ type: "reasoning", text: "thinking step 1" }) + expect(result[0].content[1]).toMatchObject({ type: "text", text: " " }) + expect(result[0].content[2]).toMatchObject({ type: "reasoning", text: "thinking step 2" }) + expect(result[0].content[3]).toMatchObject({ type: "text", text: "final answer" }) + }) + test("splits assistant messages on step-start boundaries", async () => { const assistantID = "m-assistant"