Skip to content

Commit 56102ff

Browse files
fix(core): detect vLLM context overflow errors (anomalyco#17763)
Co-authored-by: Aiden Cline <[email protected]>
1 parent 1b86c27 commit 56102ff

1 file changed

Lines changed: 3 additions & 1 deletion

File tree

packages/opencode/src/provider/error.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,16 @@ export namespace ProviderError {
1313
/input token count.*exceeds the maximum/i, // Google (Gemini)
1414
/maximum prompt length is \d+/i, // xAI (Grok)
1515
/reduce the length of the messages/i, // Groq
16-
/maximum context length is \d+ tokens/i, // OpenRouter, DeepSeek
16+
/maximum context length is \d+ tokens/i, // OpenRouter, DeepSeek, vLLM
1717
/exceeds the limit of \d+/i, // GitHub Copilot
1818
/exceeds the available context size/i, // llama.cpp server
1919
/greater than the context length/i, // LM Studio
2020
/context window exceeds limit/i, // MiniMax
2121
/exceeded model token limit/i, // Kimi For Coding, Moonshot
2222
/context[_ ]length[_ ]exceeded/i, // Generic fallback
2323
/request entity too large/i, // HTTP 413
24+
/context length is only \d+ tokens/i, // vLLM
25+
/input length.*exceeds.*context length/i, // vLLM
2426
]
2527

2628
function isOpenAiErrorRetryable(e: APICallError) {

0 commit comments

Comments
 (0)