Skip to content

Commit 3741101

Browse files
committed
fix(provider): gate zai/zhipuai thinking injection on reasoning capability and restore GLM variants
- Only inject `thinking: { type: "enabled", clear_thinking: false }` for z.ai/zhipuai models that have `capabilities.reasoning = true`. Previously this was sent unconditionally, causing non-reasoning GLM models (e.g. glm-5-turbo, glm-4.5-flash) to return empty responses silently. - Remove `id.includes("glm")` from the early-return exclusion block in `variants()`. GLM reasoning models routed through `@ai-sdk/openai-compatible` now fall through to the correct switch case and return `{ low, medium, high }` reasoning-effort variants. - Update tests: GLM variant test now asserts reasoning efforts are returned; add non-reasoning z.ai model test to confirm thinking is not injected.
1 parent cfbbae7 commit 3741101

2 files changed

Lines changed: 33 additions & 8 deletions

File tree

packages/opencode/src/provider/transform.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -407,7 +407,6 @@ export function variants(model: Provider.Model): Record<string, Record<string, a
407407
if (
408408
id.includes("deepseek") ||
409409
id.includes("minimax") ||
410-
id.includes("glm") ||
411410
id.includes("mistral") ||
412411
id.includes("kimi") ||
413412
id.includes("k2p5") ||
@@ -828,7 +827,8 @@ export function options(input: {
828827

829828
if (
830829
["zai", "zhipuai"].some((id) => input.model.providerID.includes(id)) &&
831-
input.model.api.npm === "@ai-sdk/openai-compatible"
830+
input.model.api.npm === "@ai-sdk/openai-compatible" &&
831+
input.model.capabilities.reasoning
832832
) {
833833
result["thinking"] = {
834834
type: "enabled",

packages/opencode/test/provider/transform.test.ts

Lines changed: 31 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,27 @@ describe("ProviderTransform.options - zai/zhipuai thinking", () => {
169169
clear_thinking: false,
170170
})
171171
})
172+
173+
test(`${providerID} should NOT set thinking cfg for non-reasoning model`, () => {
174+
const result = ProviderTransform.options({
175+
model: {
176+
...createModel(providerID),
177+
capabilities: {
178+
temperature: true,
179+
reasoning: false,
180+
attachment: true,
181+
toolcall: true,
182+
input: { text: true, audio: false, image: false, video: false, pdf: false },
183+
output: { text: true, audio: false, image: false, video: false, pdf: false },
184+
interleaved: false,
185+
},
186+
},
187+
sessionID,
188+
providerOptions: {},
189+
})
190+
191+
expect(result.thinking).toBeUndefined()
192+
})
172193
}
173194
})
174195

@@ -2099,18 +2120,22 @@ describe("ProviderTransform.variants", () => {
20992120
expect(result).toEqual({})
21002121
})
21012122

2102-
test("glm returns empty object", () => {
2123+
test("glm via openai-compatible returns reasoning efforts", () => {
21032124
const model = createMockModel({
2104-
id: "glm/glm-4",
2105-
providerID: "glm",
2125+
id: "zai-coding-plan/glm-4.7",
2126+
providerID: "zai-coding-plan",
21062127
api: {
2107-
id: "glm-4",
2108-
url: "https://api.glm.com",
2128+
id: "glm-4.7",
2129+
url: "https://open.bigmodel.cn/api/paas/v4",
21092130
npm: "@ai-sdk/openai-compatible",
21102131
},
21112132
})
21122133
const result = ProviderTransform.variants(model)
2113-
expect(result).toEqual({})
2134+
expect(result).toEqual({
2135+
low: { reasoningEffort: "low" },
2136+
medium: { reasoningEffort: "medium" },
2137+
high: { reasoningEffort: "high" },
2138+
})
21142139
})
21152140

21162141
test("mistral returns empty object", () => {

0 commit comments

Comments
 (0)