Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion packages/opencode/src/session/compaction.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import { Effect, Layer, ServiceMap } from "effect"
import { makeRuntime } from "@/effect/run-service"
import { InstanceState } from "@/effect/instance-state"
import { isOverflow as overflow } from "./overflow"
import { Evidence } from "./evidence"

export namespace SessionCompaction {
const log = Log.create({ service: "session.compaction" })
Expand Down Expand Up @@ -130,7 +131,10 @@ export namespace SessionCompaction {
if (pruned > PRUNE_MINIMUM) {
for (const part of toPrune) {
if (part.state.status === "completed") {
part.state.time.compacted = Date.now()
const state = part.state
const evidence = Evidence.tool({ tool: part.tool, state })
state.time.compacted = Date.now()
state.metadata = { ...state.metadata, evidence }
yield* session.updatePart(part)
}
}
Expand Down
90 changes: 90 additions & 0 deletions packages/opencode/src/session/evidence.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import { Hash } from "@/util/hash"
import { Locale } from "@/util/locale"
import type { MessageV2 } from "./message-v2"

export namespace Evidence {
const INPUT_MAX = 240
const OUTPUT_MAX = 600
const OUTPUT_LINES = 12
const HASH_MAX = 12
const FILE_MAX = 3

export interface Tool {
tool: string
title: string
input: string
excerpt: string
hash: string
bytes: number
lines: number
path?: string
files?: string[]
}

function clip(input: string) {
return Locale.truncate(input.split("\n").slice(0, OUTPUT_LINES).join("\n"), OUTPUT_MAX)
}

function files(input?: MessageV2.ToolStateCompleted["attachments"]) {
if (!input?.length) return undefined
const list = input.map((item) => item.filename ?? item.mime)
if (list.length <= FILE_MAX) return list
return [...list.slice(0, FILE_MAX), `+${list.length - FILE_MAX} more`]
}

function path(input: MessageV2.ToolStateCompleted["metadata"]) {
return typeof input.outputPath === "string" ? input.outputPath : undefined
}

export function tool(input: {
tool: string
state: Pick<MessageV2.ToolStateCompleted, "title" | "input" | "output" | "metadata" | "attachments">
}): Tool {
const data = JSON.stringify(input.state.input)
return {
tool: input.tool,
title: input.state.title,
input: Locale.truncate(data === undefined ? "{}" : data, INPUT_MAX),
excerpt: clip(input.state.output),
hash: Hash.fast(input.state.output).slice(0, HASH_MAX),
bytes: Buffer.byteLength(input.state.output, "utf-8"),
lines: input.state.output.split("\n").length,
path: path(input.state.metadata),
files: files(input.state.attachments),
}
}

export function isTool(input: unknown): input is Tool {
if (!input || typeof input !== "object") return false
return (
"tool" in input &&
typeof input.tool === "string" &&
"title" in input &&
typeof input.title === "string" &&
"input" in input &&
typeof input.input === "string" &&
"excerpt" in input &&
typeof input.excerpt === "string" &&
"hash" in input &&
typeof input.hash === "string" &&
"bytes" in input &&
typeof input.bytes === "number" &&
"lines" in input &&
typeof input.lines === "number"
)
}

export function text(input: Tool) {
return [
"[Compacted tool result]",
`tool: ${input.tool}`,
`title: ${input.title}`,
`input: ${input.input}`,
`proof: sha1=${input.hash}, bytes=${input.bytes}, lines=${input.lines}`,
...(input.path ? [`path: ${input.path}`] : []),
...(input.files?.length ? [`attachments: ${input.files.join(", ")}`] : []),
"excerpt:",
input.excerpt,
].join("\n")
}
}
6 changes: 5 additions & 1 deletion packages/opencode/src/session/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ export namespace LLM {
tools: Record<string, Tool>
retries?: number
toolChoice?: "auto" | "required" | "none"
opts?: Record<string, any>
}

export type StreamRequest = StreamInput & {
Expand Down Expand Up @@ -142,6 +143,7 @@ export namespace LLM {
mergeDeep(input.model.options),
mergeDeep(input.agent.options),
mergeDeep(variant),
mergeDeep(input.opts ?? {}),
)
if (isOpenaiOauth) {
options.instructions = system.join("\n")
Expand Down Expand Up @@ -255,7 +257,7 @@ export namespace LLM {
}
}

return streamText({
const result = streamText({
onError(error) {
l.error("stream error", {
error,
Expand Down Expand Up @@ -332,6 +334,8 @@ export namespace LLM {
},
},
})

return result
}

function resolveTools(input: Pick<StreamInput, "tools" | "agent" | "permission" | "user">) {
Expand Down
15 changes: 12 additions & 3 deletions packages/opencode/src/session/message-v2.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import type { SystemError } from "bun"
import type { Provider } from "@/provider/provider"
import { ModelID, ProviderID } from "@/provider/schema"
import { Effect } from "effect"
import { Evidence } from "./evidence"

/** Error shape thrown by Bun's fetch() when gzip/br decompression fails mid-stream */
interface FetchDecompressionError extends Error {
Expand Down Expand Up @@ -255,6 +256,7 @@ export namespace MessageV2 {
reason: z.string(),
snapshot: z.string().optional(),
cost: z.number(),
metadata: z.record(z.string(), z.any()).optional(),
tokens: z.object({
total: z.number().optional(),
input: z.number(),
Expand Down Expand Up @@ -715,8 +717,15 @@ export namespace MessageV2 {
if (part.type === "tool") {
toolNames.add(part.tool)
if (part.state.status === "completed") {
const outputText = part.state.time.compacted ? "[Old tool result content cleared]" : part.state.output
const attachments = part.state.time.compacted || options?.stripMedia ? [] : (part.state.attachments ?? [])
const state = part.state
const outputText = state.time.compacted
? Evidence.text(
Evidence.isTool(state.metadata.evidence)
? state.metadata.evidence
: Evidence.tool({ tool: part.tool, state }),
)
: state.output
const attachments = state.time.compacted || options?.stripMedia ? [] : (state.attachments ?? [])

// For providers that don't support media in tool results, extract media files
// (images, PDFs) to be sent as a separate user message
Expand All @@ -739,7 +748,7 @@ export namespace MessageV2 {
type: ("tool-" + part.tool) as `tool-${string}`,
state: "output-available",
toolCallId: part.callID,
input: part.state.input,
input: state.input,
output,
...(differentModel ? {} : { callProviderMetadata: part.metadata }),
})
Expand Down
1 change: 1 addition & 0 deletions packages/opencode/src/session/processor.ts
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,7 @@ export namespace SessionProcessor {
id: PartID.ascending(),
reason: value.finishReason,
snapshot: yield* snapshot.track(),
metadata: value.providerMetadata,
messageID: ctx.assistantMessage.id,
sessionID: ctx.assistantMessage.sessionID,
type: "step-finish",
Expand Down
55 changes: 48 additions & 7 deletions packages/opencode/src/session/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,28 @@ const STRUCTURED_OUTPUT_SYSTEM_PROMPT = `IMPORTANT: The user has requested struc
export namespace SessionPrompt {
const log = Log.create({ service: "session.prompt" })

function threaded(model: Provider.Model) {
return model.providerID === "openai"
}

function chain(input: { model: Provider.Model; user: MessageV2.User; assistant?: MessageV2.WithParts }) {
const msg = input.assistant
if (!threaded(input.model) || !msg || msg.info.role !== "assistant") return
if ((input.model.options as { store?: boolean } | undefined)?.store !== true) return
if (`${input.model.providerID}/${input.model.id}` !== `${msg.info.providerID}/${msg.info.modelID}`) return
if (input.user.id > msg.info.id) return
if (
!msg.parts.some(
(part) => part.type === "tool" && part.state.status !== "pending" && part.state.status !== "running",
)
)
return
const part = msg.parts.findLast((part): part is MessageV2.StepFinishPart => part.type === "step-finish")
const id = part?.metadata?.openai?.responseId
if (typeof id !== "string" || !id) return
return { id, msgs: [msg] }
}

export interface Interface {
readonly assertNotBusy: (sessionID: SessionID) => Effect.Effect<void, Session.BusyError>
readonly cancel: (sessionID: SessionID) => Effect.Effect<void>
Expand Down Expand Up @@ -1343,6 +1365,23 @@ NOTE: At any point in time through this workflow you should feel free to ask the
let structured: unknown | undefined
let step = 0
const session = yield* sessions.get(sessionID)
const skills = new Map<string, string | undefined>()
const env = new Map<string, string[]>()
const systemPrompt = Effect.fnUntraced(function* (agent: Agent.Info, model: Provider.Model) {
const key = `${model.providerID}/${model.id}`
const skill = skills.has(agent.name)
? skills.get(agent.name)
: yield* Effect.promise(() => SystemPrompt.skills(agent)).pipe(
Effect.tap((value) => Effect.sync(() => skills.set(agent.name, value))),
)
const vars = env.has(key)
? env.get(key)!
: yield* Effect.promise(() => SystemPrompt.environment(model)).pipe(
Effect.tap((value) => Effect.sync(() => env.set(key, value))),
)
const instructions = yield* instruction.system().pipe(Effect.orDie)
return [...vars, ...(skill ? [skill] : []), ...instructions]
})

while (true) {
yield* status.set(sessionID, { type: "busy" })
Expand Down Expand Up @@ -1501,26 +1540,28 @@ NOTE: At any point in time through this workflow you should feel free to ask the

yield* plugin.trigger("experimental.chat.messages.transform", {}, { messages: msgs })

const [skills, env, instructions, modelMsgs] = yield* Effect.all([
Effect.promise(() => SystemPrompt.skills(agent)),
Effect.promise(() => SystemPrompt.environment(model)),
instruction.system().pipe(Effect.orDie),
Effect.promise(() => MessageV2.toModelMessages(msgs, model)),
const reuse = chain({ model, user: lastUser, assistant: lastAssistantMsg })
const src = reuse ? reuse.msgs : msgs

const [system, modelMsgs] = yield* Effect.all([
systemPrompt(agent, model),
Effect.promise(() => MessageV2.toModelMessages(src, model)),
])
const system = [...env, ...(skills ? [skills] : []), ...instructions]
const format = lastUser.format ?? { type: "text" as const }
if (format.type === "json_schema") system.push(STRUCTURED_OUTPUT_SYSTEM_PROMPT)
const send = reuse ? modelMsgs.filter((msg) => msg.role === "tool") : modelMsgs
const result = yield* handle.process({
user: lastUser,
agent,
permission: session.permission,
sessionID,
parentSessionID: session.parentID,
system,
messages: [...modelMsgs, ...(isLastStep ? [{ role: "assistant" as const, content: MAX_STEPS }] : [])],
messages: [...send, ...(isLastStep ? [{ role: "assistant" as const, content: MAX_STEPS }] : [])],
tools,
model,
toolChoice: format.type === "json_schema" ? "required" : undefined,
opts: reuse ? { previousResponseId: reuse.id, store: true } : undefined,
})

if (structured !== undefined) {
Expand Down
7 changes: 7 additions & 0 deletions packages/opencode/test/session/compaction.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,13 @@ describe("session.compaction.prune", () => {
expect(part?.state.status).toBe("completed")
if (part?.type === "tool" && part.state.status === "completed") {
expect(part.state.time.compacted).toBeNumber()
expect(part.state.metadata.evidence).toMatchObject({
tool: "bash",
title: "done",
lines: 1,
})
expect(part.state.metadata.evidence).toHaveProperty("hash")
expect(part.state.metadata.evidence).toHaveProperty("excerpt")
}
},
})
Expand Down
76 changes: 47 additions & 29 deletions packages/opencode/test/session/message-v2.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ describe("session.message-v2.toModelMessage", () => {
])
})

test("replaces compacted tool output with placeholder", async () => {
test("replaces compacted tool output with an evidence digest", async () => {
const userID = "m-user"
const assistantID = "m-assistant"

Expand Down Expand Up @@ -470,35 +470,53 @@ describe("session.message-v2.toModelMessage", () => {
},
]

expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
},
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-1",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-1",
toolName: "bash",
output: { type: "text", value: "[Old tool result content cleared]" },
const result = await MessageV2.toModelMessages(input, model)

expect(result).toHaveLength(3)
expect(result[0]).toStrictEqual({
role: "user",
content: [{ type: "text", text: "run tool" }],
})
expect(result[1]).toStrictEqual({
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-1",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
},
],
})
const tool = result[2] as {
role: string
content: Array<{
type: string
toolCallId: string
toolName: string
output: { type: string; value: string }
}>
}
expect(tool).toMatchObject({
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-1",
toolName: "bash",
output: {
type: "text",
},
],
},
])
},
],
})

const text = tool.content[0]!.output.value
expect(text).toContain("tool: bash")
expect(text).toContain('input: {"cmd":"ls"}')
expect(text).toContain("excerpt:\nthis should be cleared")
expect(text).not.toContain("[Old tool result content cleared]")
})

test("converts assistant tool error into error-text tool result", async () => {
Expand Down
Loading
Loading