Skip to content

Commit 9eb6b6e

Browse files
author
Mickey Knox
committed
obey model features when sending messages
1 parent 85d10af commit 9eb6b6e

File tree

8 files changed

+41
-20
lines changed

8 files changed

+41
-20
lines changed

app/modules/serviceInterfaces/LocalServerServiceInterface/Sources/sendMessageSchema.generated.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ extension Schema {
1010
public let messages: [Message]
1111
public let system: String?
1212
public let projectRoot: String?
13-
public let tools: [Tool]
13+
public let tools: [Tool]?
1414
public let model: String
1515
public let enableReasoning: Bool
1616
public let provider: APIProvider
@@ -31,7 +31,7 @@ extension Schema {
3131
messages: [Message],
3232
system: String? = nil,
3333
projectRoot: String? = nil,
34-
tools: [Tool],
34+
tools: [Tool]? = nil,
3535
model: String,
3636
enableReasoning: Bool,
3737
provider: APIProvider,
@@ -52,7 +52,7 @@ extension Schema {
5252
messages = try container.decode([Message].self, forKey: .messages)
5353
system = try container.decodeIfPresent(String?.self, forKey: .system)
5454
projectRoot = try container.decodeIfPresent(String?.self, forKey: .projectRoot)
55-
tools = try container.decode([Tool].self, forKey: .tools)
55+
tools = try container.decodeIfPresent([Tool]?.self, forKey: .tools)
5656
model = try container.decode(String.self, forKey: .model)
5757
enableReasoning = try container.decode(Bool.self, forKey: .enableReasoning)
5858
provider = try container.decode(APIProvider.self, forKey: .provider)
@@ -64,7 +64,7 @@ extension Schema {
6464
try container.encode(messages, forKey: .messages)
6565
try container.encodeIfPresent(system, forKey: .system)
6666
try container.encodeIfPresent(projectRoot, forKey: .projectRoot)
67-
try container.encode(tools, forKey: .tools)
67+
try container.encodeIfPresent(tools, forKey: .tools)
6868
try container.encode(model, forKey: .model)
6969
try container.encode(enableReasoning, forKey: .enableReasoning)
7070
try container.encode(provider, forKey: .provider)

app/modules/services/LLMService/Sources/DefaultLLMService.swift

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -267,12 +267,14 @@ final class DefaultLLMService: LLMService {
267267
messages: messageHistory,
268268
system: system,
269269
projectRoot: context?.projectRoot?.path,
270-
tools: tools
270+
tools: model.supportsTools
271+
? tools
271272
// Unless we are using an external agent, only send to the AI tools that are internal.
272273
.filter { ($0.canBeExecuted && $0.id == $0.referenceId) || provider.isExternalAgent }
273-
.map { .init(name: $0.name, description: $0.description, inputSchema: $0.inputSchema) },
274+
.map { .init(name: $0.name, description: $0.description, inputSchema: $0.inputSchema) }
275+
: nil,
274276
model: providerModel.id,
275-
enableReasoning: enableReasoning,
277+
enableReasoning: model.canReason && enableReasoning,
276278
provider: .init(
277279
provider: provider,
278280
settings: providerSettings,

app/modules/services/LLMService/Tests/LLMModelManagerTests.swift

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1094,15 +1094,15 @@ private func makeSchemaModel(
10941094
maxCompletionTokens: 8_192,
10951095
inputModalities: [.text],
10961096
outputModalities: [.text],
1097-
supportsChat: true,
1098-
supportsTools: true,
1099-
supportsReasoning: false,
1100-
supportsCompletion: true,
11011097
pricing: pricing ?? Schema.ModelPricing(
11021098
prompt: 1.0,
11031099
completion: 2.0),
11041100
createdAt: Date().timeIntervalSince1970,
1105-
rankForProgramming: 1)
1101+
rankForProgramming: 1,
1102+
supportsChat: true,
1103+
supportsTools: true,
1104+
supportsReasoning: false,
1105+
supportsCompletion: true)
11061106
}
11071107

11081108
private func makeListModelsOutput(models: [Schema.Model]) -> Schema.ListModelsOutput {

local-server/build.sha256

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
e73e92e58623897e84bfc944cf385d6089ea727d20db1a5ad64c5af486c6653b
1+
a8b504499e156b2f32b035c4cd88fb7acf4f9ac82231cfaf1576032a2a02b3bc

local-server/src/server/endpoints/sendMessage/sendMessage.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ export const registerEndpoint = (router: Router, aiProviders: AIProvider[]) => {
103103
})
104104
}
105105

106-
await sendMessageImpl({ messages, localExecutable, threadId, tools }, res)
106+
await sendMessageImpl({ messages, localExecutable, threadId, tools: tools ?? [] }, res)
107107
return
108108
}
109109

@@ -147,8 +147,8 @@ export const registerEndpoint = (router: Router, aiProviders: AIProvider[]) => {
147147
model,
148148
abortSignal: abortController.signal,
149149
tools: (addProviderOptionsToTools
150-
? addProviderOptionsToTools(tools.map(mapTool))
151-
: tools.map(mapTool)
150+
? addProviderOptionsToTools(tools?.map(mapTool) ?? [])
151+
: tools?.map(mapTool)
152152
)?.reduce(
153153
(acc, tool) => {
154154
acc[tool.name] = tool

local-server/src/server/providers/__tests__/ollama.test.ts

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ describe("OllamaAIProvider", () => {
3030
})
3131

3232
expect(result.model).toBeDefined()
33-
expect(result.generalProviderOptions).toEqual({})
33+
expect(result.generalProviderOptions).toBeUndefined()
3434
})
3535

3636
it("should create provider with custom baseURL", () => {
@@ -73,6 +73,17 @@ describe("OllamaAIProvider", () => {
7373
delete process.env.OLLAMA_LOCAL_SERVER_PROXY
7474
}
7575
})
76+
77+
it("should create provider with thinking option when reasoningBudget is set", () => {
78+
const result = provider.build({
79+
provider: {},
80+
modelName: "llama2",
81+
reasoningBudget: 100,
82+
})
83+
84+
expect(result.model).toBeDefined()
85+
expect(result.generalProviderOptions).toEqual({ ollama: { think: true } })
86+
})
7687
})
7788

7889
describe("listModels", () => {

local-server/src/server/providers/ollama.ts

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { AIProvider, AIProviderInput, AIProviderOutput, ProviderModel, ProviderConfig, ModelModality } from "./provider"
22
import { APIProviderName } from "@/server/schemas/sendMessageSchema"
3-
import { createOllama } from "ollama-ai-provider-v2"
3+
import { createOllama, OllamaCompletionProviderOptions } from "ollama-ai-provider-v2"
44
import { LanguageModel } from "ai"
55
import { UserFacingError } from "../errors"
66
import { ProviderModelFullInfo } from "./provider"
@@ -88,6 +88,7 @@ export class OllamaAIProvider implements AIProvider {
8888
const {
8989
provider: { baseUrl },
9090
modelName,
91+
reasoningBudget,
9192
} = params
9293

9394
let finalBaseUrl = process.env["OLLAMA_LOCAL_SERVER_PROXY"] ?? baseUrl ?? "http://localhost:11434/api"
@@ -99,9 +100,16 @@ export class OllamaAIProvider implements AIProvider {
99100
baseURL: finalBaseUrl,
100101
})
101102

103+
const providerOptions: OllamaCompletionProviderOptions = {}
104+
if (reasoningBudget) {
105+
providerOptions.think = true
106+
}
107+
108+
const generalProviderOptions = Object.keys(providerOptions).length > 0 ? { ollama: providerOptions } : undefined
109+
102110
return {
103111
model: provider(modelName) as unknown as LanguageModel,
104-
generalProviderOptions: {},
112+
generalProviderOptions,
105113
}
106114
}
107115

local-server/src/server/schemas/sendMessageSchema.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ export interface SendMessageRequestParams {
22
messages: Message[]
33
system?: string
44
projectRoot: string | undefined
5-
tools: Tool[]
5+
tools?: Tool[]
66
model: string
77
enableReasoning: boolean
88
provider: APIProvider

0 commit comments

Comments
 (0)