Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@ import ai.koog.prompt.dsl.prompt
import ai.koog.prompt.executor.clients.anthropic.AnthropicModels
import ai.koog.prompt.executor.clients.google.GoogleModels
import ai.koog.prompt.executor.clients.openai.OpenAIModels
import ai.koog.prompt.executor.llms.SingleLLMPromptExecutor
import ai.koog.prompt.executor.llms.all.simpleAnthropicExecutor
import ai.koog.prompt.executor.llms.all.simpleGoogleAIExecutor
import ai.koog.prompt.executor.llms.all.simpleOpenAIExecutor
import ai.koog.prompt.executor.ollama.client.OllamaClient
import ai.koog.prompt.llm.OllamaModels
import ai.koog.prompt.markdown.markdown
import kotlinx.coroutines.runBlocking
import kotlinx.io.files.Path

suspend fun main() {
Expand Down Expand Up @@ -40,18 +42,24 @@ suspend fun main() {
}
}

val ollamaModel = OllamaModels.Alibaba.QWEN3_VL_8B
val llmClient = OllamaClient().also { it.getModelOrNull(ollamaModel.id, pullIfMissing = true) }
val ollamaExecutor = SingleLLMPromptExecutor(llmClient)
val openaiExecutor = simpleOpenAIExecutor(ApiKeyService.openAIApiKey)
val anthropicExecutor = simpleAnthropicExecutor(ApiKeyService.anthropicApiKey)
val googleExecutor = simpleGoogleAIExecutor(ApiKeyService.googleApiKey)

try {
println("OllamaAI response:")
ollamaExecutor.execute(prompt, ollamaModel).single().content.also(::println)
println("OpenAI response:")
openaiExecutor.execute(prompt, OpenAIModels.Chat.GPT4_1).single().content.also(::println)
println("Anthropic response:")
anthropicExecutor.execute(prompt, AnthropicModels.Sonnet_4).single().content.also(::println)
println("Google response:")
googleExecutor.execute(prompt, GoogleModels.Gemini2_0Flash).single().content.also(::println)
} finally {
ollamaExecutor.close()
openaiExecutor.close()
anthropicExecutor.close()
googleExecutor.close()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,32 @@ public object OllamaModels {
),
contextLength = 32_768,
)

/**
* Represents the Alibaba Qwen vision-language model version 3 with 8 billion parameters.
*
* This predefined instance of `LLModel` is provided by Alibaba and supports the following capabilities:
* - `Schema.JSON.Simple`: Supports tasks requiring JSON schema validation and handling in a simplified manner.
* - `Temperature`: Allows adjustment of the temperature setting for controlling the randomness in responses.
* - `Tools`: Enables interaction with external tools or functionalities within the model's ecosystem.
* - `Vision.Image`: Enables processing and understanding of image inputs alongside text.
*
* The model is identified by the unique ID "qwen3-vl:8b" and categorized under the Ollama provider.
*
* <a href="https://ollama.com/library/qwen3-vl">
*/
public val QWEN3_VL_8B: LLModel = LLModel(
provider = LLMProvider.Ollama,
id = "qwen3-vl:8b",
capabilities =
listOf(
LLMCapability.Schema.JSON.Basic,
LLMCapability.Temperature,
LLMCapability.Tools,
LLMCapability.Vision.Image,
),
contextLength = 256_000,
)
}

/**
Expand Down Expand Up @@ -327,4 +353,33 @@ public object OllamaModels {
contextLength = 16_384,
)
}

/**
* The `OpenAI` object represents the configuration for OpenAI large language models (LLMs).
* It contains the predefined model specifications for OpenAI LLMs, including their identifiers
* and supported capabilities.
*/
public object OpenAI {
/**
* Represents the OpenAI gpt-oss model with 20 billion parameters.
*
* It leverages a standard set of capabilities for interaction.
*
* @see <a href="https://ollama.com/library/gpt-oss">
*/
public val GPT_OSS_20B: LLModel = LLModel(
provider = LLMProvider.Ollama,
id = "gpt-oss:20b",
capabilities =
listOf(
LLMCapability.Completion,
LLMCapability.Schema.JSON.Standard,
LLMCapability.Speculation,
LLMCapability.Temperature,
LLMCapability.ToolChoice,
LLMCapability.Tools,
),
contextLength = 128_000,
)
}
}