Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 24 additions & 22 deletions packages/opencode/bin/opencode
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
#!/usr/bin/env node

const childProcess = require("child_process")
const fs = require("fs")
const path = require("path")
const os = require("os")
import { spawnSync } from "child_process"
import { realpathSync, existsSync, readdirSync } from "fs"
import { dirname, join } from "path"
import { platform, arch } from "os"
import { fileURLToPath } from "url"

const __filename = fileURLToPath(import.meta.url)

function run(target) {
const result = childProcess.spawnSync(target, process.argv.slice(2), {
const result = spawnSync(target, process.argv.slice(2), {
stdio: "inherit",
})
if (result.error) {
Expand All @@ -21,9 +24,8 @@ const envPath = process.env.OPENCODE_BIN_PATH
if (envPath) {
run(envPath)
}

const scriptPath = fs.realpathSync(__filename)
const scriptDir = path.dirname(scriptPath)
const scriptPath = realpathSync(__filename)
const scriptDir = dirname(scriptPath)

const platformMap = {
darwin: "darwin",
Expand All @@ -36,34 +38,34 @@ const archMap = {
arm: "arm",
}

let platform = platformMap[os.platform()]
if (!platform) {
platform = os.platform()
let platformName = platformMap[platform()]
if (!platformName) {
platformName = platform()
}
let arch = archMap[os.arch()]
if (!arch) {
arch = os.arch()
let archName = archMap[arch()]
if (!archName) {
archName = arch()
}
const base = "opencode-" + platform + "-" + arch
const binary = platform === "windows" ? "opencode.exe" : "opencode"
const base = "opencode-" + platformName + "-" + archName
const binary = platformName === "windows" ? "opencode.exe" : "opencode"

function findBinary(startDir) {
let current = startDir
for (;;) {
const modules = path.join(current, "node_modules")
if (fs.existsSync(modules)) {
const entries = fs.readdirSync(modules)
const modules = join(current, "node_modules")
if (existsSync(modules)) {
const entries = readdirSync(modules)
for (const entry of entries) {
if (!entry.startsWith(base)) {
continue
}
const candidate = path.join(modules, entry, "bin", binary)
if (fs.existsSync(candidate)) {
const candidate = join(modules, entry, "bin", binary)
if (existsSync(candidate)) {
return candidate
}
}
}
const parent = path.dirname(current)
const parent = dirname(current)
if (parent === current) {
return
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
import type { LanguageModelV2 } from "@ai-sdk/provider"
import { wrapLanguageModel } from "ai"

/**
* Wraps a fetch function to filter out empty tool_calls arrays from API responses.
*
* LM Studio (and some other OpenAI-compatible providers) always include
* `tool_calls: []` in responses, even when no tools are called. This causes
* the AI SDK to wait indefinitely for tool execution. This function intercepts
* the fetch responses and removes empty tool_calls arrays when finish_reason is "stop".
*/
export function createFilteredFetch(originalFetch: typeof fetch): typeof fetch {
const filteredFetch = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => {
const response = await originalFetch(input, init)

// Only process JSON responses from chat completions endpoints
const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url
if (!url.includes("/chat/completions")) {
return response
}

const contentType = response.headers.get("content-type")

// For streaming responses (text/event-stream), process the SSE stream
if (contentType && contentType.includes("text/event-stream")) {
const originalStream = response.body
if (!originalStream) {
return response
}

const stream = new ReadableStream({
async start(controller) {
const reader = originalStream.getReader()
const decoder = new TextDecoder()
let buffer = ""

try {
while (true) {
const { done, value } = await reader.read()
if (done) break

buffer += decoder.decode(value, { stream: true })
const chunks = buffer.split("\n\n")
buffer = chunks.pop() || ""

for (const chunk of chunks) {
if (chunk.startsWith("data: ")) {
const data = chunk.slice(6).trim()
if (data === "[DONE]") {
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
continue
}

try {
const json = JSON.parse(data)
let modified = false

// Process choices in the stream chunk
if (json.choices && Array.isArray(json.choices)) {
const modifiedChoices = json.choices.map((choice: any) => {
// Only filter empty tool_calls from the final message when finish_reason is "stop"
// Don't filter from deltas during streaming as they might be part of valid tool call streams
if (
choice.message &&
Array.isArray(choice.message.tool_calls) &&
choice.message.tool_calls.length === 0 &&
choice.finish_reason === "stop"
) {
modified = true
const { tool_calls, ...rest } = choice.message
return {
...choice,
message: rest,
}
}

return choice
})

if (modified) {
json.choices = modifiedChoices
controller.enqueue(
new TextEncoder().encode(`data: ${JSON.stringify(json)}\n\n`)
)
continue
}
}

// No modification needed, pass through
controller.enqueue(new TextEncoder().encode(`${chunk}\n\n`))
} catch {
// Not JSON or parse error, pass through
controller.enqueue(new TextEncoder().encode(`${chunk}\n\n`))
}
} else if (chunk.trim()) {
// Not a data line but has content, pass through
controller.enqueue(new TextEncoder().encode(`${chunk}\n\n`))
}
}
}

// Flush remaining buffer
if (buffer.trim()) {
controller.enqueue(new TextEncoder().encode(buffer))
}

controller.close()
} catch (error) {
controller.error(error)
}
},
})

return new Response(stream, {
status: response.status,
statusText: response.statusText,
headers: response.headers,
})
}

// For non-streaming JSON responses
if (contentType && contentType.includes("application/json")) {
// Clone the response so we can read the body
const clonedResponse = response.clone()

try {
const text = await clonedResponse.text()
let json: any

try {
json = JSON.parse(text)
} catch {
// Not JSON, return original response
return response
}

// Process non-streaming responses
if (json.choices && Array.isArray(json.choices)) {
let modified = false
const modifiedChoices = json.choices.map((choice: any) => {
if (
choice.message &&
Array.isArray(choice.message.tool_calls) &&
choice.message.tool_calls.length === 0 &&
choice.finish_reason === "stop"
) {
modified = true
const { tool_calls, ...rest } = choice.message
return {
...choice,
message: rest,
}
}
return choice
})

if (modified) {
return new Response(JSON.stringify({ ...json, choices: modifiedChoices }), {
status: response.status,
statusText: response.statusText,
headers: response.headers,
})
}
}
} catch (error) {
// If anything goes wrong, return the original response
return response
}
}

return response
}
return filteredFetch as typeof fetch
}

/**
* Wraps a language model to filter out empty tool_calls from responses.
* Note: This is a placeholder - the actual filtering happens at the fetch level
* via createFilteredFetch. This function just returns the model as-is since
* wrapLanguageModel middleware only supports transformParams, not transformResult.
*/
export function filterEmptyToolCalls<T extends LanguageModelV2>(model: T): T {
// The filtering is handled at the fetch level in createFilteredFetch
// No need to wrap with middleware since transformResult is not supported
return model
}

Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import type { LanguageModelV2 } from "@ai-sdk/provider"
import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible"
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
import { createFilteredFetch, filterEmptyToolCalls } from "./openai-compatible-middleware"

// Import the version or define it
const VERSION = "0.1.0"
Expand Down Expand Up @@ -66,12 +67,27 @@ export function createOpenaiCompatible(options: OpenaiCompatibleProviderSettings
const getHeaders = () => withUserAgentSuffix(headers, `ai-sdk/openai-compatible/${VERSION}`)

const createChatModel = (modelId: OpenaiCompatibleModelId) => {
return new OpenAICompatibleChatLanguageModel(modelId, {
const originalFetch = options.fetch ?? fetch

// Only apply empty tool_calls filtering for LM Studio
// Detect LM Studio by checking baseURL (localhost) or provider name
const isLMStudio =
baseURL.includes("localhost") ||
baseURL.includes("127.0.0.1") ||
options.name?.toLowerCase().includes("lm-studio") ||
options.name?.toLowerCase().includes("lmstudio")

const fetchToUse = isLMStudio ? createFilteredFetch(originalFetch) : originalFetch

const baseModel = new OpenAICompatibleChatLanguageModel(modelId, {
provider: `${options.name ?? "openai-compatible"}.chat`,
headers: getHeaders,
url: ({ path }) => `${baseURL}${path}`,
fetch: options.fetch,
fetch: fetchToUse,
})

// Only wrap with middleware for LM Studio
return isLMStudio ? filterEmptyToolCalls(baseModel) : baseModel
}

const createResponsesModel = (modelId: OpenaiCompatibleModelId) => {
Expand Down
37 changes: 36 additions & 1 deletion packages/opencode/src/session/processor.ts
Original file line number Diff line number Diff line change
Expand Up @@ -248,10 +248,45 @@ export namespace SessionProcessor {
input.assistantMessage.finish = value.finishReason
input.assistantMessage.cost += usage.cost
input.assistantMessage.tokens = usage.tokens

// Fallback: If finish_reason is "stop" and we have pending tool calls
// that were never invoked, clean them up. This handles cases where
// providers (like LM Studio) send empty tool_calls arrays that cause
// the AI SDK to create pending tool-call events that never complete.
if (value.finishReason === "stop") {
const parts = await MessageV2.parts(input.assistantMessage.id)
for (const part of parts) {
if (
part.type === "tool" &&
part.state.status === "pending" &&
(!part.state.input || Object.keys(part.state.input).length === 0)
) {
// This is a pending tool call that was never actually invoked
// (empty input means it was created from an empty tool_calls array)
// Remove it since finish_reason is "stop" and no tools were called
const startTime = Date.now()
await Session.updatePart({
...part,
state: {
status: "error",
input: part.state.input,
error: "Empty tool_calls array filtered",
time: {
start: startTime,
end: Date.now(),
},
},
})
delete toolcalls[part.callID]
}
}
}

const finishSnapshot = await Snapshot.track()
await Session.updatePart({
id: Identifier.ascending("part"),
reason: value.finishReason,
snapshot: await Snapshot.track(),
snapshot: finishSnapshot,
messageID: input.assistantMessage.id,
sessionID: input.assistantMessage.sessionID,
type: "step-finish",
Expand Down