Skip to content
Closed
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,6 @@ public inline fun <reified T> AIAgentSubgraphBuilderBase<*, *>.nodeLLMRequestStr
}
}


/**
* A node that appends a user message to the LLM prompt, streams LLM response and transforms the stream data.
*
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ class AIAgentLLMWriteSessionTest {
user("I have some text that needs processing.")
assistant("I'll use the test-tool to process your text.")
tool {
call("call_1", 0, "test-tool", """{"input":"sample data"}""")
call("call_1", "test-tool", """{"input":"sample data"}""")
result("call_1", "test-tool", "Processed: sample data")
}
assistant(
Expand All @@ -134,7 +134,7 @@ class AIAgentLLMWriteSessionTest {
user("Can you also use the custom tool to process this data?")
assistant("Sure, I'll use the custom tool for additional processing.")
tool {
call("call_2", 0, "custom-tool", """{"input":"additional processing"}""")
call("call_2", "custom-tool", """{"input":"additional processing"}""")
result("call_2", "custom-tool", """{"output":"Custom processed: additional processing"}""")
}
assistant(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ import ai.koog.prompt.llm.OllamaModels
import ai.koog.prompt.message.Message
import ai.koog.prompt.message.ResponseMetaInfo
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.streamFrameFlowOf
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.flowOf
import kotlinx.coroutines.test.runTest
import kotlinx.datetime.Clock
import kotlinx.datetime.Instant
Expand Down Expand Up @@ -70,7 +70,7 @@ class ChoiceSelectionStrategyTest {
model: LLModel,
tools: List<ToolDescriptor>
): Flow<StreamFrame> =
flowOf(StreamFrame.Append("Default streaming response"))
streamFrameFlowOf("Default streaming response")

override suspend fun executeMultipleChoices(
prompt: Prompt,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@ import ai.koog.agents.core.agent.AIAgent
import ai.koog.agents.core.agent.config.AIAgentConfig
import ai.koog.agents.core.dsl.builder.forwardTo
import ai.koog.agents.core.dsl.builder.strategy
import ai.koog.agents.core.utils.filterTextOnly
import ai.koog.agents.example.ApiKeyService
import ai.koog.prompt.executor.llms.all.simpleOpenAIExecutor
import ai.koog.prompt.executor.model.PromptExecutor
import ai.koog.prompt.streaming.filterTextOnly
import kotlinx.coroutines.runBlocking

fun main(): Unit = runBlocking {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ import ai.koog.agents.core.agent.config.AIAgentConfig
import ai.koog.agents.core.dsl.builder.forwardTo
import ai.koog.agents.core.dsl.builder.strategy
import ai.koog.agents.core.tools.ToolRegistry
import ai.koog.agents.core.utils.filterTextOnly
import ai.koog.agents.example.ApiKeyService
import ai.koog.prompt.executor.llms.all.simpleOpenAIExecutor
import ai.koog.prompt.executor.model.PromptExecutor
import ai.koog.prompt.streaming.filterTextOnly
import kotlinx.coroutines.flow.collect
import kotlinx.coroutines.runBlocking

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package ai.koog.integration.tests.executor
import ai.koog.agents.core.tools.ToolDescriptor
import ai.koog.agents.core.tools.ToolParameterDescriptor
import ai.koog.agents.core.tools.ToolParameterType
import ai.koog.agents.core.utils.filterTextOnly
import ai.koog.integration.tests.utils.MediaTestScenarios
import ai.koog.integration.tests.utils.MediaTestScenarios.AudioTestScenario
import ai.koog.integration.tests.utils.MediaTestScenarios.ImageTestScenario
Expand Down Expand Up @@ -45,6 +44,7 @@ import ai.koog.prompt.message.AttachmentContent
import ai.koog.prompt.message.Message
import ai.koog.prompt.params.LLMParams.ToolChoice
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.filterTextOnly
import ai.koog.prompt.structure.executeStructured
import kotlinx.coroutines.flow.toList
import kotlinx.coroutines.runBlocking
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package ai.koog.integration.tests.executor
import ai.koog.agents.core.tools.ToolDescriptor
import ai.koog.agents.core.tools.ToolParameterDescriptor
import ai.koog.agents.core.tools.ToolParameterType
import ai.koog.agents.core.utils.filterTextOnly
import ai.koog.integration.tests.InjectOllamaTestFixture
import ai.koog.integration.tests.OllamaTestFixture
import ai.koog.integration.tests.OllamaTestFixtureExtension
Expand All @@ -21,6 +20,7 @@ import ai.koog.prompt.llm.LLMCapability.Tools
import ai.koog.prompt.llm.LLMCapability.Vision
import ai.koog.prompt.markdown.markdown
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.filterTextOnly
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.flow
import kotlinx.coroutines.test.runTest
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package ai.koog.integration.tests.executor
import ai.koog.agents.core.tools.ToolDescriptor
import ai.koog.agents.core.tools.ToolParameterDescriptor
import ai.koog.agents.core.tools.ToolParameterType
import ai.koog.agents.core.utils.filterTextOnly
import ai.koog.integration.tests.utils.MediaTestScenarios
import ai.koog.integration.tests.utils.MediaTestScenarios.AudioTestScenario
import ai.koog.integration.tests.utils.MediaTestScenarios.ImageTestScenario
Expand Down Expand Up @@ -52,6 +51,7 @@ import ai.koog.prompt.message.AttachmentContent
import ai.koog.prompt.message.Message
import ai.koog.prompt.params.LLMParams.ToolChoice
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.filterTextOnly
import ai.koog.prompt.structure.executeStructured
import kotlinx.coroutines.flow.toList
import kotlinx.coroutines.test.runTest
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@ import ai.koog.agents.core.tools.ToolDescriptor
import ai.koog.agents.core.tools.ToolParameterDescriptor
import ai.koog.agents.core.tools.ToolParameterType
import ai.koog.agents.core.tools.annotations.LLMDescription
import ai.koog.agents.core.utils.filterTextOnly
import ai.koog.prompt.dsl.Prompt
import ai.koog.prompt.llm.LLMProvider
import ai.koog.prompt.llm.LLModel
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.filterTextOnly
import ai.koog.prompt.structure.RegisteredStandardJsonSchemaGenerators
import ai.koog.prompt.structure.StructureFixingParser
import ai.koog.prompt.structure.StructuredOutput
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public class CachedPromptExecutor(
getOrPut(prompt, tools, model).forEach {
emit(it.toStreamFrame())
}
}
}

private suspend fun getOrPut(prompt: Prompt, model: LLModel): Message.Assistant {
return cache.get(prompt, emptyList(), clock)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ import ai.koog.prompt.message.Message
import ai.koog.prompt.message.RequestMetaInfo
import ai.koog.prompt.message.ResponseMetaInfo
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.streamFrameFlowOf
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.flowOf
import kotlinx.coroutines.test.runTest
import kotlinx.datetime.Clock
import kotlin.test.Test
Expand Down Expand Up @@ -73,7 +73,7 @@ class CachedPromptExecutorTest {
tools: List<ToolDescriptor>
): Flow<StreamFrame> {
executeStreamingCalled = true
return flowOf(StreamFrame.Append("Streaming response from executor"))
return streamFrameFlowOf("Streaming response from executor")
}

override suspend fun moderate(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ import ai.koog.prompt.message.Message
import ai.koog.prompt.message.ResponseMetaInfo
import ai.koog.prompt.params.LLMParams
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.emitAppend
import ai.koog.prompt.streaming.emitEnd
import ai.koog.prompt.streaming.emitToolCall
import ai.koog.prompt.streaming.streamFrameFlow
import io.github.oshai.kotlinlogging.KotlinLogging
import io.ktor.client.HttpClient
import io.ktor.client.call.body
Expand All @@ -38,7 +42,6 @@ import io.ktor.http.isSuccess
import io.ktor.serialization.kotlinx.json.json
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.flow
import kotlinx.coroutines.withContext
import kotlinx.datetime.Clock
import kotlinx.serialization.json.Json
Expand Down Expand Up @@ -149,7 +152,7 @@ public open class AnthropicLLMClient(
prompt: Prompt,
model: LLModel,
tools: List<ToolDescriptor>
): Flow<StreamFrame> = flow {
): Flow<StreamFrame> = streamFrameFlow {
logger.debug { "Executing streaming prompt: $prompt with model: $model without tools" }
require(model.capabilities.contains(LLMCapability.Completion)) {
"Model ${model.id} does not support chat completions"
Expand All @@ -175,14 +178,15 @@ public open class AnthropicLLMClient(
.takeIf { it.event == "content_block_delta" }
?.data?.trim()?.let { json.decodeFromString<AnthropicStreamResponse>(it) }
?.let { response ->
response.delta?.text?.let { emit(StreamFrame.Append(it)) }
response.message?.stopReason?.let { emit(StreamFrame.End(it)) }
response.delta?.toolUse?.let { emit(StreamFrame.ToolCall(
response.delta?.text?.let { emitAppend(it) }
response.delta?.toolUse?.let {
emitToolCall(
id = it.id,
name = it.name,
content = it.input.toString())
content = it.input.toString()
)
}
response.message?.stopReason?.let { emitEnd(it) }
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,6 @@ internal object BedrockAI21JambaSerialization {
)
}?.let(::addAll)
}
}?:emptyList()
} ?: emptyList()
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,6 @@ internal object BedrockAmazonNovaSerialization {
buildList {
it.text?.let(StreamFrame::Append)?.let(::add)
}
}?:emptyList()
} ?: emptyList()
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ internal object BedrockAnthropicClaudeSerialization {
}
contentParts.add(AnthropicContent.Image(source = imageSource))
}

else -> throw IllegalArgumentException(
"Unsupported attachment type: ${attachment::class.simpleName}"
)
Expand Down Expand Up @@ -238,14 +239,15 @@ internal object BedrockAnthropicClaudeSerialization {
)
}?.let(::add)
}
}?:emptyList()
} ?: emptyList()
}

"message_delta" -> {
streamResponse.message?.content?.map { content ->
when (content) {
is AnthropicResponseContent.Text ->
StreamFrame.Append(content.text)

is AnthropicResponseContent.ToolUse ->
StreamFrame.ToolCall(
id = content.id,
Expand All @@ -268,7 +270,7 @@ internal object BedrockAnthropicClaudeSerialization {
emptyList()
}

else -> emptyList()
else -> emptyList()
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,13 @@ import ai.koog.prompt.executor.model.LLMChoice
import ai.koog.prompt.llm.LLModel
import ai.koog.prompt.params.LLMParams
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.emitAppend
import ai.koog.prompt.streaming.emitEnd
import ai.koog.prompt.streaming.emitToolCall
import ai.koog.prompt.streaming.streamFrameFlow
import io.github.oshai.kotlinlogging.KotlinLogging
import io.ktor.client.HttpClient
import kotlinx.coroutines.flow.Flow
import kotlinx.datetime.Clock

/**
Expand Down Expand Up @@ -98,19 +103,20 @@ public class DeepSeekLLMClient(
override fun decodeResponse(data: String): DeepSeekChatCompletionResponse =
json.decodeFromString(data)

override fun processStreamingChunk(chunk: DeepSeekChatCompletionStreamResponse): List<StreamFrame> =
chunk.choices.firstOrNull()?.delta?.let {
buildList {
it.content?.let(StreamFrame::Append)?.let(::add)
it.toolCalls?.map { toolCall ->
StreamFrame.ToolCall(
override fun processStreamingChunk(chunk: DeepSeekChatCompletionStreamResponse): Flow<StreamFrame> =
streamFrameFlow {
chunk.choices.firstOrNull()?.let { choice ->
choice.delta.content?.let { emitAppend(it) }
choice.delta.toolCalls?.forEach { toolCall ->
emitToolCall(
id = toolCall.id,
name = toolCall.function?.name,
content = toolCall.function?.arguments
name = toolCall.function?.name?:return@forEach,
content = toolCall.function?.arguments?:"{}"
)
}?.let(::addAll)
}
choice.finishReason?.let { emitEnd(it) }
}
}?:emptyList()
}

public override suspend fun moderate(prompt: Prompt, model: LLModel): ModerationResult {
logger.warn { "Moderation is not supported by DeepSeek API" }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ import ai.koog.prompt.message.Message
import ai.koog.prompt.message.ResponseMetaInfo
import ai.koog.prompt.params.LLMParams
import ai.koog.prompt.streaming.StreamFrame
import ai.koog.prompt.streaming.emitAppend
import ai.koog.prompt.streaming.emitEnd
import ai.koog.prompt.streaming.emitToolCall
import ai.koog.prompt.streaming.streamFrameFlow
import ai.koog.prompt.structure.RegisteredBasicJsonSchemaGenerators
import ai.koog.prompt.structure.RegisteredStandardJsonSchemaGenerators
import ai.koog.prompt.structure.annotations.InternalStructuredOutputApi
Expand All @@ -47,7 +51,7 @@ import io.ktor.http.isSuccess
import io.ktor.serialization.kotlinx.json.json
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.flow
import kotlinx.coroutines.flow.onCompletion
import kotlinx.coroutines.withContext
import kotlinx.datetime.Clock
import kotlinx.serialization.json.Json
Expand Down Expand Up @@ -146,7 +150,7 @@ public open class GoogleLLMClient(
prompt: Prompt,
model: LLModel,
tools: List<ToolDescriptor>
): Flow<StreamFrame> = flow {
): Flow<StreamFrame> = streamFrameFlow {
logger.debug { "Executing streaming prompt: $prompt with model: $model" }
require(model.capabilities.contains(LLMCapability.Completion)) {
"Model ${model.id} does not support chat completions"
Expand All @@ -168,19 +172,25 @@ public open class GoogleLLMClient(
setBody(request)
}
) {
incoming.collect { event ->
incoming.onCompletion {
if (it == null)
emitEnd() // TODO: finishReason?
else
throw it
}.collect { event ->
event
.takeIf { it.data != "[DONE]" }
?.data?.trim()?.let { json.decodeFromString<GoogleResponse>(it) }
?.candidates?.firstOrNull()?.content
?.parts?.forEach { part ->
when(part) {
is GooglePart.FunctionCall -> emit(StreamFrame.ToolCall(
when (part) {
is GooglePart.FunctionCall -> emitToolCall(
id = part.functionCall.id,
name = part.functionCall.name,
content = part.functionCall.args?.toString()?:"{}")
content = part.functionCall.args?.toString() ?: "{}"
)
is GooglePart.Text -> emit(StreamFrame.Append(part.text))

is GooglePart.Text -> emitAppend(part.text)
else -> Unit
}
}
Expand Down
Loading
Loading