diff --git a/.changeset/tough-planes-lie.md b/.changeset/tough-planes-lie.md new file mode 100644 index 0000000..a845151 --- /dev/null +++ b/.changeset/tough-planes-lie.md @@ -0,0 +1,2 @@ +--- +--- diff --git a/README.md b/README.md index 23780d9..0ccd67b 100644 --- a/README.md +++ b/README.md @@ -10,10 +10,10 @@ ## Features -- :zap: **Functions all the way down** — `agent`, `tool`, `workflow` are functions returning plain objects. +- :zap: **Functions all the way down** — `agent`, `tool`, `flowAgent` are functions returning plain objects. - :jigsaw: **Composition over configuration** — Combine small pieces instead of configuring large ones. - :shield: **Result, never throw** — Every public method returns `Result`. -- :lock: **Closures are state** — Workflow state is just variables in your handler. +- :lock: **Closures are state** — Flow agent state is just variables in your handler. - :triangular_ruler: **Type-driven design** — Zod schemas, discriminated unions, exhaustive matching. ## Install @@ -28,16 +28,16 @@ npm install @funkai/agents @funkai/prompts ```ts import { agent } from "@funkai/agents"; -import { prompts } from "~prompts"; +import { openai } from "@ai-sdk/openai"; const writer = agent({ name: "writer", - model: "openai/gpt-4.1", - system: prompts("writer"), + model: openai("gpt-4.1"), + system: "You are a helpful writer.", tools: { search }, }); -const result = await writer.generate("Write about closures"); +const result = await writer.generate({ prompt: "Write about closures" }); ``` ### Define a prompt @@ -54,14 +54,15 @@ You are a {{ tone }} writer. ### Generate typed prompts ```bash -npx funkai prompts generate --out .prompts/client --roots src/agents +npx funkai prompts generate --out .prompts/client --includes "src/agents/**" ``` ## Packages | Package | Description | | ------------------------------------- | -------------------------------------------------------------------- | -| [`@funkai/agents`](packages/agents) | Lightweight agent, tool, and workflow orchestration | +| [`@funkai/agents`](packages/agents) | Lightweight agent, tool, and flow agent orchestration | +| [`@funkai/models`](packages/models) | Model catalog, provider resolution, and cost calculations | | [`@funkai/prompts`](packages/prompts) | Prompt SDK with LiquidJS templating, Zod validation, and CLI codegen | | [`@funkai/cli`](packages/cli) | CLI for the funkai prompt SDK | diff --git a/contributing/concepts/architecture.md b/contributing/concepts/architecture.md index a0cb655..abdd544 100644 --- a/contributing/concepts/architecture.md +++ b/contributing/concepts/architecture.md @@ -98,7 +98,7 @@ The models package provides the model catalog, provider resolution, and cost cal | Module | Purpose | | -------- | -------------------------------------------------------------------- | | Catalog | Model definitions with pricing data, lookup by ID, filtered queries | -| Provider | OpenRouter integration, `createModelResolver()` for multi-provider | +| Provider | Provider registry, `createProviderRegistry()` for multi-provider | | Cost | `calculateCost()` to compute dollar costs from token usage + pricing | ### Generated Data @@ -142,7 +142,7 @@ The prompts package provides a prompt authoring SDK with two surfaces: 4. **Type-driven** -- Discriminated unions, branded types, exhaustive matching via ts-pattern 5. **Zod at boundaries** -- Runtime validation for configs, user input, and external data 6. **Vercel AI SDK foundation** -- Built on `ai` package for model interaction, tool calling, and streaming -7. **Multi-provider support** -- Model resolution via `createModelResolver()` with OpenRouter as default fallback +7. **Multi-provider support** -- Model resolution via `createProviderRegistry()` with configurable provider mappings 8. **Composition over inheritance** -- Small, focused interfaces composed together ## Package Conventions diff --git a/contributing/guides/getting-started.md b/contributing/guides/getting-started.md index f6b45dd..ab30e35 100644 --- a/contributing/guides/getting-started.md +++ b/contributing/guides/getting-started.md @@ -46,7 +46,7 @@ Read the project docs in this order: 2. [`contributing/concepts/architecture.md`](../concepts/architecture.md) -- package ecosystem, design principles, data flow 3. [`contributing/concepts/tech-stack.md`](../concepts/tech-stack.md) -- tools, libraries, and design rationale 4. Relevant standards in `contributing/standards/` as needed -5. Package docs: [`@funkai/agents`](/agents/) and [`@funkai/prompts`](/prompts/) +5. Package docs: [`@funkai/agents`](/concepts/agents) and [`@funkai/prompts`](/concepts/prompts) ### 6. Set up Claude Code (optional) diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 0000000..e89fd5f --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,78 @@ +# Architecture + +## Package dependency graph + +```mermaid +graph TD + agents["@funkai/agents"] + models["@funkai/models"] + prompts["@funkai/prompts"] + cli["@funkai/cli"] + ai["ai (Vercel AI SDK)"] + + agents --> models + agents --> ai + cli --> prompts + models --> ai +``` + +- **`@funkai/agents`** depends on `@funkai/models` (for `ProviderRegistry` type) and `ai` (Vercel AI SDK). +- **`@funkai/models`** depends on `ai` for the `LanguageModel` type. Standalone otherwise. +- **`@funkai/prompts`** is fully standalone -- no dependency on other funkai packages. +- **`@funkai/cli`** depends on `@funkai/prompts` for prompt generation and linting. + +## Agent + +`agent()` wraps the AI SDK's `generateText` and `streamText` with: + +- **Typed input** -- Optional Zod schema + prompt template. When provided, `.generate()` accepts typed input and validates it before calling the model. In simple mode, raw strings or message arrays pass through directly. +- **Tools** -- A record of `tool()` instances exposed to the model for function calling. +- **Subagents** -- Other agents passed via the `agents` config, automatically wrapped as callable tools with abort signal propagation. +- **Hooks** -- `onStart`, `onFinish`, `onError`, `onStepFinish` for observability. Per-call overrides merge with base hooks. +- **Result** -- Every method returns `Result`. Success fields are flat on the object. Errors carry `code`, `message`, and optional `cause`. + +The tool loop runs up to `maxSteps` iterations (default 20), where each iteration may invoke tools or subagents before producing a final response. + +## FlowAgent + +`flowAgent()` provides code-driven orchestration. The handler receives `{ input, $, log }`: + +- **`input`** -- Validated against the input Zod schema. +- **`$`** (StepBuilder) -- Traced operations: `$.step()`, `$.agent()`, `$.map()`, `$.each()`, `$.reduce()`, `$.while()`, `$.all()`, `$.race()`. Each call becomes an entry in the execution trace. +- **`log`** -- Scoped logger with contextual bindings. + +The handler returns the output value, validated against the optional output Zod schema. Each `$` operation is modeled as a synthetic tool call in the message history, making flow agents compatible with the same `GenerateResult` and `StreamResult` types as regular agents. + +FlowAgent results include additional fields: `trace` (array of step entries) and `duration` (wall-clock milliseconds). + +## The Runnable interface + +Both `Agent` and `FlowAgent` satisfy the `Runnable` interface: + +```typescript +interface Runnable { + generate(input: TInput, config?): Promise>; + stream(input: TInput, config?): Promise; fullStream }>>; + fn(): (input: TInput, config?) => Promise>; +} +``` + +This enables nesting: a `FlowAgent` can call any `Agent` or `FlowAgent` via `$.agent()`. An `Agent` can delegate to subagents via the `agents` config. The framework uses a symbol-keyed metadata property (`RUNNABLE_META`) to extract the name and input schema when wrapping runnables as tools. + +## @funkai/models + +Provides three capabilities: + +- **Model catalog** -- `model(id)` and `models()` for querying model definitions (capabilities, modalities, pricing) sourced from OpenRouter. The catalog is generated at build time via `pnpm --filter=@funkai/models generate:models`. +- **Provider registry** -- `createProviderRegistry()` maps provider names to AI SDK provider instances, enabling string-based model resolution (e.g., `'openai/gpt-4.1'`). +- **Cost calculation** -- `calculateCost()` computes dollar costs from token usage and model pricing data. + +## @funkai/prompts + +Build-time prompt templating: + +- Prompts are defined as `.prompt` files with YAML frontmatter (metadata, Zod schema reference) and LiquidJS template bodies. +- `createPromptRegistry()` loads compiled prompt modules and provides type-safe rendering at runtime. +- The `@funkai/cli` package provides commands for creating, generating, and linting prompt files. + +Prompts are independent of the agents package -- they can be used standalone or integrated into agent prompt functions. diff --git a/docs/concepts/agents.md b/docs/concepts/agents.md new file mode 100644 index 0000000..01340e8 --- /dev/null +++ b/docs/concepts/agents.md @@ -0,0 +1,134 @@ +# Agents + +`agent()` creates a single LLM boundary — one model, an optional tool loop, typed I/O, and `Result`-based returns. It wraps the AI SDK's `generateText`/`streamText` without hiding them. + +Every call to `.generate()` or `.stream()` returns `Result`. Check `.ok` before accessing values — there is no try/catch. + +## Two modes + +**Simple** — no input schema. Pass a prompt object at call time. + +```typescript +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; + +const assistant = agent({ + name: "assistant", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", +}); + +const result = await assistant.generate({ prompt: "What is TypeScript?" }); + +if (!result.ok) { + console.error(result.error.message); + process.exit(1); +} + +console.log(result.output); // string +``` + +**Typed** — declare `input` (Zod schema) and `prompt` (render function) together. Call time is fully type-checked. + +```typescript +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const summarizer = agent({ + name: "summarizer", + model: openai("gpt-4.1"), + input: z.object({ text: z.string(), maxWords: z.number() }), + prompt: ({ input }) => `Summarize the following in ${input.maxWords} words:\n\n${input.text}`, +}); + +const result = await summarizer.generate({ input: { text: "Long article...", maxWords: 50 } }); + +if (result.ok) { + console.log(result.output); +} +``` + +`input` and `prompt` must be provided together — one without the other is a type error. + +## Streaming + +Use `.stream()` instead of `.generate()`. Consume `result.fullStream` for incremental output; `result.output` resolves after the stream ends. + +```typescript +const result = await assistant.stream({ prompt: "Tell me a story." }); + +if (result.ok) { + for await (const part of result.fullStream) { + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } + } + const final = await result.output; +} +``` + +## Tools and subagents + +Pass a `tools` record for function calling. Pass an `agents` record to expose other agents as callable tools — abort signals propagate automatically. + +```typescript +const analyst = agent({ + name: "analyst", + model: openai("gpt-4.1"), + system: "You analyze data. Delegate searches to the searcher.", + tools: { calculator }, + agents: { searcher: searchAgent }, +}); +``` + +## Output strategies + +The `output` field controls the return type of `result.output`: + +| Strategy | Result type | Description | +| ---------------------------- | ----------- | -------------------------------------------------------- | +| `Output.text()` | `string` | Plain text (default) | +| `Output.object({ schema })` | `T` | Validated structured object matching the Zod schema | +| `Output.array({ element })` | `T[]` | Validated array of objects matching the element schema | +| `Output.choice({ options })` | `string` | One of the provided string options (enum/classification) | +| `z.object({ ... })` | `T` | Shorthand — auto-wrapped as `Output.object()` | +| `z.array(z.object({ ... }))` | `T[]` | Shorthand — auto-wrapped as `Output.array()` | + +```typescript +import { agent, Output } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const classifier = agent({ + name: "classifier", + model: openai("gpt-4.1"), + output: Output.object({ + schema: z.object({ + category: z.enum(["bug", "feature", "question"]), + confidence: z.number(), + }), + }), +}); + +const result = await classifier.generate({ prompt: "App crashes on login" }); +if (result.ok) { + console.log(result.output.category); // "bug" + console.log(result.output.confidence); // 0.95 +} +``` + +## Hooks + +`onStart`, `onFinish`, `onError`, and `onStepFinish` fire at lifecycle points. Set them on the config or pass them per-call as overrides. + +## When to use `agent()` vs `flowAgent()` + +Use `agent()` when a single model call (with optional tool iterations) is sufficient — question answering, classification, summarization, or single-turn tool use. Use `flowAgent()` when you need to coordinate multiple agents, run parallel work, or implement custom control flow with traced steps. See [Flow Agents](/concepts/flow-agents) for details. + +## References + +- [`agent()` reference](/reference/agents/agent) +- [Streaming guide](/guides/streaming) +- [Tools](/concepts/tools) +- [Flow Agents](/concepts/flow-agents) diff --git a/docs/concepts/flow-agents.md b/docs/concepts/flow-agents.md new file mode 100644 index 0000000..4ffcca6 --- /dev/null +++ b/docs/concepts/flow-agents.md @@ -0,0 +1,99 @@ +# Flow Agents + +`flowAgent()` creates a multi-step agent whose logic is plain imperative TypeScript. There are no step arrays or definition objects — you write a handler function and use `$` for tracked operations. + +Flow agents always require a typed `input` Zod schema, validated on entry. The `output` schema is optional — when provided, the handler's return value is validated against it before being returned to the caller. When omitted, the handler returns `void` and the collected text from sub-agent responses becomes a `string` output. + +## Basic example + +```typescript +import { agent, flowAgent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const writer = agent({ + name: "writer", + model: openai("gpt-4.1"), + input: z.object({ topic: z.string() }), + prompt: ({ input }) => `Write a short paragraph about: ${input.topic}`, +}); + +const pipeline = flowAgent( + { + name: "write-and-review", + input: z.object({ topic: z.string() }), + output: z.object({ text: z.string() }), + }, + async ({ input, $ }) => { + // $.step — tracked unit of synchronous or async work + const slug = await $.step({ + id: "slugify", + execute: async () => input.topic.toLowerCase().replace(/\s+/g, "-"), + }); + + // $.agent — tracked agent call, returns StepResult + const draft = await $.agent({ + id: "write-draft", + agent: writer, + input: { topic: input.topic }, + }); + + if (!draft.ok) { + return { text: "Generation failed." }; + } + + return { text: draft.value.output }; + }, +); + +const result = await pipeline.generate({ input: { topic: "pattern matching" } }); + +if (result.ok) { + console.log(result.output.text); + console.log("Duration:", result.duration, "ms"); + console.log("Trace:", result.trace); // full execution tree +} +``` + +## The $ step builder + +`$` provides operations that are tracked in the execution trace. All return `Promise>` — check `.ok` before using `.value`. + +| Operation | Description | +| ---------- | ------------------------------------------------------ | +| `$.step` | Single unit of work | +| `$.agent` | Call an `agent()` as a tracked step | +| `$.map` | Map over an array with optional concurrency | +| `$.each` | Iterate an array sequentially | +| `$.reduce` | Reduce an array to a single value | +| `$.while` | Loop while a condition holds | +| `$.all` | Run multiple operations in parallel (all must succeed) | +| `$.race` | Run multiple operations in parallel (first one wins) | + +State lives in plain variables — use closures. There is no shared state object. + +## Trace and usage + +`result.trace` is a readonly tree of every `$` operation: its id, type, duration, and nested children. `result.usage` aggregates token counts from all `$.agent` calls in the flow. + +## Streaming step progress + +`.stream()` emits `StepEvent` objects (`step:start`, `step:finish`, `step:error`, `flow:finish`) as each `$` operation runs. Use this to push real-time progress to a UI. + +```typescript +const result = await pipeline.stream({ input: { topic: "closures" } }); + +if (result.ok) { + for await (const event of result.fullStream) { + if (event.type === "step:finish") { + console.log(event.step.id, "done in", event.duration, "ms"); + } + } +} +``` + +## References + +- [`flowAgent()` reference](/reference/agents/flow-agent) +- [Multi-Agent Orchestration guide](/guides/multi-agent) +- [Agents](/concepts/agents) diff --git a/docs/concepts/models.md b/docs/concepts/models.md new file mode 100644 index 0000000..9f7c0fb --- /dev/null +++ b/docs/concepts/models.md @@ -0,0 +1,84 @@ +# Models + +`@funkai/models` covers three domains: a catalog of 300+ models sourced from [models.dev](https://models.dev), provider resolution that maps model IDs to AI SDK `LanguageModel` instances, and cost calculation from token usage. + +## Model catalog + +`model(id)` looks up a single model definition. `models(filter?)` returns the full catalog or a filtered subset. + +```typescript +import { model, models } from "@funkai/models"; + +// Single lookup — returns ModelDefinition | null +const gpt41 = model("gpt-4.1"); +if (gpt41) { + console.log(gpt41.name); + console.log(gpt41.contextWindow); + console.log(gpt41.capabilities.toolCall); // boolean + console.log(gpt41.pricing.input); // cost per input token in USD +} + +// Filtered list +const reasoningModels = models((m) => m.capabilities.reasoning); +const openaiModels = models((m) => m.provider === "openai"); +``` + +`ModelDefinition` includes: `id`, `name`, `provider`, `family`, `contextWindow`, `maxOutput`, `pricing`, `capabilities`, and `modalities`. + +## Provider resolution + +`createProviderRegistry()` maps provider prefixes to AI SDK provider factories. Call the returned registry with a `"provider/model"` ID to get a `LanguageModel` instance. + +```typescript +import { createProviderRegistry } from "@funkai/models"; +import { createOpenAI } from "@ai-sdk/openai"; +import { createAnthropic } from "@ai-sdk/anthropic"; + +const registry = createProviderRegistry({ + providers: { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }), + }, +}); + +// Returns a LanguageModel — pass directly to agent() +const lm = registry("openai/gpt-4.1"); +``` + +The prefix before the first `/` selects the provider factory. Model IDs without a `/` throw — always use the full `"provider/model"` format. + +## Cost calculation + +`calculateCost()` takes a `TokenUsage` object and a `ModelPricing` object (from a `ModelDefinition`) and returns a `UsageCost` breakdown. + +```typescript +import { model, calculateCost } from "@funkai/models"; + +const m = model("gpt-4.1"); + +if (m) { + const cost = calculateCost( + { + inputTokens: 1_000, + outputTokens: 500, + totalTokens: 1_500, + cacheReadTokens: 0, + cacheWriteTokens: 0, + reasoningTokens: 0, + }, + m.pricing, + ); + + console.log(`Total: $${cost.total.toFixed(6)}`); + // cost.input, cost.output, cost.cacheRead, cost.cacheWrite are also available +} +``` + +Token usage from agent results (`result.usage`) can be passed directly to `calculateCost()`. + +## References + +- [`model()` & `models()` reference](/reference/models/model) +- [`createProviderRegistry()` reference](/reference/models/provider-registry) +- [`calculateCost()` reference](/reference/models/calculate-cost) +- [Cost Tracking guide](/guides/cost-tracking) diff --git a/docs/concepts/prompts.md b/docs/concepts/prompts.md new file mode 100644 index 0000000..28c7a39 --- /dev/null +++ b/docs/concepts/prompts.md @@ -0,0 +1,79 @@ +# Prompts + +The prompts system has two surfaces that work together: + +- **CLI** (`@funkai/cli`) — build-time codegen. Reads `.prompt` files, validates them, and emits typed TypeScript modules. +- **Library** (`@funkai/prompts`) — runtime rendering. The generated code uses the library to render LiquidJS templates with Zod-validated variables. + +## The .prompt file format + +A `.prompt` file is a LiquidJS template with a YAML frontmatter block. The frontmatter declares the prompt's name, an optional group for nesting, and a variable schema. + +``` +--- +name: code-reviewer +group: agents +schema: + language: + type: string + description: Programming language being reviewed + diff: + type: string + description: The code diff to review + strict: + type: string + required: false +--- + +You are a {{ language }} code reviewer. + +Review the following diff: + +{{ diff }} +{% if strict %}Apply strict style enforcement.{% endif %} +``` + +The CLI compiles this into a TypeScript module with a typed `render(variables)` function and a Zod `schema`. + +## Generating code + +Run the CLI to compile `.prompt` files into TypeScript: + +```bash +npx @funkai/cli prompts generate --includes "src/**/*.prompt" --out .prompts/client +``` + +Lint prompts for schema/template alignment without generating: + +```bash +npx @funkai/cli prompts lint --includes "src/**/*.prompt" +``` + +## Using generated prompts + +The CLI emits an `index.ts` that exports a `prompts` registry. Import it via the `~prompts` tsconfig alias (configured during setup): + +```typescript +import { prompts } from "~prompts"; + +// Flat prompt (no group) +const text = prompts.greeting.render({ name: "Alice" }); + +// Grouped prompt (group: agents) +const review = prompts.agents.codeReviewer.render({ + language: "TypeScript", + diff: "- const x = 1\n+ const x: number = 1", +}); +``` + +`render()` validates variables against the Zod schema before rendering. A missing required variable throws at call time, not at model invocation. + +## Runtime-only usage + +If you want to use the library without codegen, create a registry manually with `createPromptRegistry()` from `@funkai/prompts`. This is uncommon — the CLI workflow is the standard path. + +## References + +- [`createPrompt()` reference](/reference/prompts/create-prompt) +- [`createPromptRegistry()` reference](/reference/prompts/create-prompt-registry) +- [Prompts CLI reference](/reference/prompts/cli) diff --git a/docs/concepts/tools.md b/docs/concepts/tools.md new file mode 100644 index 0000000..3d8e420 --- /dev/null +++ b/docs/concepts/tools.md @@ -0,0 +1,91 @@ +# Tools + +`tool()` creates a typed function-calling tool. The model sees the description and input schema; when it calls the tool, the input is validated against the Zod schema before `execute` receives it. + +Tool names come from the object key in the agent's `tools` record — there is no `name` field on the config. + +## Creating a tool + +```typescript +import { tool } from "@funkai/agents"; +import { z } from "zod"; + +const fetchPage = tool({ + description: "Fetch the HTML contents of a web page by URL.", + inputSchema: z.object({ + url: z.string().url(), + }), + execute: async ({ url }) => { + const res = await fetch(url); + return { status: res.status, body: await res.text() }; + }, +}); +``` + +## Using a tool with an agent + +```typescript +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; + +const researcher = agent({ + name: "researcher", + model: openai("gpt-4.1"), + system: "You research topics by fetching web pages.", + tools: { fetchPage }, // key "fetchPage" becomes the tool name +}); + +const result = await researcher.generate({ prompt: "Summarize https://example.com" }); + +if (result.ok) { + console.log(result.output); +} +``` + +## Output validation + +Add `outputSchema` to validate what `execute` returns: + +```typescript +const calculator = tool({ + description: "Evaluate a math expression and return the numeric result.", + inputSchema: z.object({ expression: z.string() }), + outputSchema: z.object({ result: z.number() }), + execute: async ({ expression }) => ({ result: evaluate(expression) }), +}); +``` + +## Input examples + +Provide `inputExamples` to guide the model toward correct usage: + +```typescript +const search = tool({ + description: "Search the codebase for a pattern.", + inputSchema: z.object({ + query: z.string(), + fileType: z.string().optional(), + }), + inputExamples: [ + { input: { query: "export const agent", fileType: "ts" } }, + { input: { query: "TODO" } }, + ], + execute: async ({ query, fileType }) => searchCodebase(query, fileType), +}); +``` + +## Config fields + +| Field | Required | Description | +| --------------- | -------- | ------------------------------------------------- | +| `description` | Yes | What the tool does (shown to the model) | +| `inputSchema` | Yes | Zod schema — validated before `execute` is called | +| `execute` | Yes | Async function receiving validated input | +| `title` | No | Display title for UIs and logs | +| `outputSchema` | No | Zod schema for validating `execute`'s return | +| `inputExamples` | No | Example inputs to guide the model | + +## References + +- [`tool()` reference](/reference/agents/tool) +- [Agents](/concepts/agents) diff --git a/docs/introduction.md b/docs/introduction.md new file mode 100644 index 0000000..218a8b4 --- /dev/null +++ b/docs/introduction.md @@ -0,0 +1,80 @@ +# Introduction + +funkai is a composable, functional TypeScript microframework for AI agent orchestration. It is built on the [Vercel AI SDK](https://ai-sdk.dev) -- not a replacement, but a thin layer that adds typed agents, multi-step workflows, and structured error handling on top of `generateText`/`streamText`. + +## The problem + +The AI SDK gives you powerful primitives like `generateText` and `streamText`. But when you start building real applications, you need more: typed agents with validated I/O, multi-step orchestration with observable traces, consistent error handling that does not rely on try/catch, and a model catalog for cost tracking. funkai adds all of this without introducing classes or hidden state. + +## Two core primitives + +funkai provides two agent primitives that share the same `Runnable` interface: + +- **`agent()`** -- A single LLM boundary with a tool loop. Wraps `generateText`/`streamText` with typed input, tools, subagents, hooks, and `Result`-based error handling. Use this when a single model call (with optional tool iterations) is sufficient. + +- **`flowAgent()`** -- Multi-step, code-driven orchestration. Your handler function receives `{ input, $, log }` where `$` is the StepBuilder providing traced operations like `$.step()`, `$.agent()`, `$.map()`, and `$.reduce()`. Use this when you need to coordinate multiple agents, run parallel work, or implement custom control flow. + +Both return `Result` from every public method -- a discriminated union you pattern-match on `ok` instead of catching exceptions. + +## Packages + +| Package | Name | Description | +| ----------------- | ------- | ----------------------------------------------------------------------------- | +| `@funkai/agents` | Agents | Agent orchestration -- `agent()`, `flowAgent()`, `tool()`, `Result` utilities | +| `@funkai/models` | Models | Model catalog, provider registry, and cost calculation | +| `@funkai/prompts` | Prompts | Build-time prompt templating with LiquidJS and Zod validation | +| `@funkai/cli` | CLI | Command-line tooling for prompt generation, linting, and setup | + +## Design at a glance + +```typescript +import { agent, flowAgent, tool } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +// Single LLM boundary +const writer = agent({ + name: "writer", + model: openai("gpt-4.1"), + system: "You write concise technical docs.", +}); + +// Multi-step orchestration +const pipeline = flowAgent( + { + name: "pipeline", + input: z.object({ topics: z.array(z.string()) }), + output: z.object({ docs: z.array(z.string()) }), + }, + async ({ input, $ }) => { + const docs = await $.map({ + id: "write-docs", + input: input.topics, + execute: async ({ item, $ }) => { + const result = await $.agent({ id: "write", agent: writer, input: item }); + if (result.ok) { + return result.value.output; + } + return ""; + }, + concurrency: 3, + }); + if (docs.ok) { + return { docs: docs.value }; + } + return { docs: [] }; + }, +); + +// Both satisfy Runnable -- same .generate(), .stream(), .fn() +const result = await pipeline.generate({ topics: ["TypeScript", "Zod"] }); +if (result.ok) { + console.log(result.output); +} +``` + +## Next steps + +- [Quick Start](/quick-start) -- Install and build your first agent in minutes. +- [Agents](/concepts/agents) -- Understand the core `agent()` primitive. +- [Flow Agents](/concepts/flow-agents) -- Multi-step orchestration with `flowAgent()`. diff --git a/docs/principles.md b/docs/principles.md new file mode 100644 index 0000000..5a70b0c --- /dev/null +++ b/docs/principles.md @@ -0,0 +1,145 @@ +# Principles + +## Functions all the way down + +`agent()`, `flowAgent()`, and `tool()` are factory functions that return plain objects. There are no classes, no `new`, no `this`. The returned objects expose methods like `.generate()` and `.stream()`, but they are closures over configuration -- not class instances. + +```typescript +const myAgent = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are helpful.", +}); + +// myAgent is a plain object: { generate, stream, fn } +const generate = myAgent.fn(); +const result = await generate("Hello"); +``` + +## Result, never throw + +Every public method returns `Result` -- a discriminated union. Pattern-match on `ok` instead of wrapping calls in try/catch. Success fields are flat on the object alongside `ok: true`. Failure carries a structured `error` with `code`, `message`, and optional `cause`. + +```typescript +const result = await myAgent.generate({ prompt: "Hello" }); + +if (!result.ok) { + // result.error.code: 'VALIDATION_ERROR' | 'AGENT_ERROR' | ... + console.error(result.error.code, result.error.message); + return; +} + +// Success -- fields are flat +console.log(result.output); +console.log(result.usage.totalTokens); +``` + +## Closures are state + +Flow agent state is just variables in your handler function. There is no state machine, no reducer, no context object to thread through. Your handler is an async function -- use `let`, loops, and conditionals as you normally would. + +```typescript +const counter = flowAgent( + { + name: "retry-loop", + input: z.object({ prompt: z.string() }), + output: z.object({ answer: z.string() }), + }, + async ({ input, $ }) => { + let attempts = 0; + let answer = ""; + + while (attempts < 3 && answer.length === 0) { + const result = await $.agent({ + id: `attempt-${attempts}`, + agent: writer, + input: input.prompt, + }); + if (result.ok) { + answer = result.value.output; + } + attempts += 1; + } + + return { answer }; + }, +); +``` + +## Composition over configuration + +Small functions composed together, not large option bags. An agent with tools is just an agent config with a `tools` record. A flow agent that calls agents is just a handler that calls `$.agent()`. Subagents are agents passed in the `agents` config -- automatically wrapped as tools. + +```typescript +const researcher = agent({ name: "researcher", model, system: "..." }); +const writer = agent({ name: "writer", model, system: "..." }); + +// Subagents as tools -- the orchestrator delegates via LLM decisions +const orchestrator = agent({ + name: "orchestrator", + model, + system: "Coordinate research and writing.", + agents: { researcher, writer }, +}); +``` + +## Zero hidden state + +There are no singletons, no module-level registries, no global configuration. Every agent carries its own config. Loggers are passed explicitly. Provider registries are created and injected, not imported from a shared module. + +```typescript +// Each agent is self-contained +const a = agent({ name: "a", model: openai("gpt-4.1"), system: "..." }); +const b = agent({ name: "b", model: openai("gpt-4.1-mini"), system: "..." }); +// No shared state between a and b +``` + +## `$` is optional sugar + +The StepBuilder (`$`) provides observability -- every `$.step()`, `$.agent()`, `$.map()` call becomes an entry in the execution trace. But it is not required. You can call agents directly, use plain loops, or mix traced and untraced operations. The trace just will not include untraced work. + +```typescript +const pipeline = flowAgent( + { + name: "mixed", + input: z.object({ text: z.string() }), + output: z.object({ result: z.string() }), + }, + async ({ input, $ }) => { + // Traced -- appears in result.trace + const analysis = await $.agent({ id: "analyze", agent: analyzer, input: input.text }); + + // Untraced -- plain function call, not in trace + let analysisText; + if (analysis.ok) { + analysisText = analysis.value.output; + } else { + analysisText = input.text; + } + const cleaned = cleanText(analysisText); + + // Traced again + const final = await $.step({ id: "format", execute: () => formatOutput(cleaned) }); + + if (final.ok) { + return { result: final.value }; + } + return { result: cleaned }; + }, +); +``` + +## Agent vs FlowAgent + +Both satisfy the `Runnable` interface -- same `.generate()`, `.stream()`, `.fn()`. The difference is internal: + +| | `agent()` | `flowAgent()` | +| ------------------------- | -------------------------------------------------- | -------------------------------------------------------- | +| **What drives execution** | LLM tool loop | Your handler code | +| **When to use** | Single model call, optionally with tools/subagents | Multi-step orchestration, conditional logic, parallelism | +| **State** | Managed by AI SDK | Variables in your handler closure | +| **Trace** | Tool-loop steps via hooks | `$` step builder entries | +| **Model required** | Yes | No (sub-agents provide their own models) | +| **Nesting** | Can delegate to subagents | Can call `$.agent()` with any `Agent` or `FlowAgent` | + +Use `agent()` when a single LLM boundary is enough. Use `flowAgent()` when you need to coordinate multiple agents, implement retry logic, run parallel work, or apply custom control flow around LLM calls. diff --git a/docs/quick-start.md b/docs/quick-start.md new file mode 100644 index 0000000..7c719d5 --- /dev/null +++ b/docs/quick-start.md @@ -0,0 +1,144 @@ +# Quick Start + +## Installation + +```bash +pnpm add @funkai/agents @ai-sdk/openai +``` + +Set your API key: + +```bash +export OPENAI_API_KEY=sk-... +``` + +## 1. Create a simple agent + +```typescript +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; + +const assistant = agent({ + name: "assistant", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", +}); + +const result = await assistant.generate({ prompt: "What is TypeScript?" }); + +if (!result.ok) { + console.error(result.error.code, result.error.message); + process.exit(1); +} + +console.log(result.output); +// result.usage.totalTokens, result.messages, result.finishReason +``` + +## 2. Add tools + +```typescript +import { agent, tool } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const fetchPage = tool({ + description: "Fetch a web page by URL", + inputSchema: z.object({ url: z.string().url() }), + execute: async ({ url }) => { + const res = await fetch(url); + return { status: res.status, body: await res.text() }; + }, +}); + +const researcher = agent({ + name: "researcher", + model: openai("gpt-4.1"), + system: "You research topics by fetching web pages.", + tools: { fetchPage }, +}); + +const result = await researcher.generate({ prompt: "Summarize https://example.com" }); + +if (result.ok) { + console.log(result.output); +} +``` + +## 3. Multi-step orchestration with flowAgent + +```typescript +import { agent, flowAgent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const writer = agent({ + name: "writer", + model: openai("gpt-4.1"), + input: z.object({ topic: z.string() }), + prompt: ({ input }) => `Write a short explanation of: ${input.topic}`, +}); + +const reviewer = agent({ + name: "reviewer", + model: openai("gpt-4.1"), + input: z.object({ draft: z.string() }), + prompt: ({ input }) => `Review and improve this text:\n\n${input.draft}`, +}); + +const pipeline = flowAgent( + { + name: "write-and-review", + input: z.object({ topic: z.string() }), + output: z.object({ final: z.string() }), + }, + async ({ input, $ }) => { + // Step 1: Write a draft + const draft = await $.agent({ + id: "write-draft", + agent: writer, + input: { topic: input.topic }, + }); + + if (!draft.ok) { + return { final: "Failed to generate draft." }; + } + + // Step 2: Review and improve + const reviewed = await $.agent({ + id: "review-draft", + agent: reviewer, + input: { draft: draft.value.output }, + }); + + if (reviewed.ok) { + return { final: reviewed.value.output }; + } + return { final: draft.value.output }; + }, +); + +const result = await pipeline.generate({ topic: "pattern matching in TypeScript" }); + +if (result.ok) { + console.log(result.output.final); + console.log("Trace:", result.trace); + console.log("Duration:", result.duration, "ms"); +} +``` + +## Streaming + +Both `agent()` and `flowAgent()` support streaming via `.stream()`: + +```typescript +const result = await assistant.stream({ prompt: "Explain closures in JavaScript" }); + +if (result.ok) { + for await (const part of result.fullStream) { + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } + } +} +``` diff --git a/docs/reference/agent.md b/docs/reference/agent.md new file mode 100644 index 0000000..7f6cee6 --- /dev/null +++ b/docs/reference/agent.md @@ -0,0 +1,206 @@ +# agent() + +Create a single-boundary LLM agent with typed I/O, tools, subagents, and `Result`-based error handling. This is the core primitive for wrapping AI SDK `generateText`/`streamText` calls. + +## Function Signature + +```typescript +function agent( + config: AgentConfig, +): Agent; +``` + +## AgentConfig + +| Field | Type | Required | Default | Description | +| -------------- | -------------------------------------------------------------------------------------------------------- | -------- | --------------- | ------------------------------------------------------------------------------ | +| `name` | `string` | Yes | — | Unique agent name used in logs and trace entries | +| `model` | `Resolver` | Yes | — | AI SDK `LanguageModel` instance or resolver function | +| `input` | `ZodType` | No | — | Zod schema for typed input. When provided, `.generate()` accepts `TInput` | +| `prompt` | `(params: { input: TInput }) => string \| Message[] \| Promise` | No | — | Maps typed input to the prompt sent to the model. Required when `input` is set | +| `system` | `Resolver` | No | — | Static or dynamic system prompt | +| `tools` | `Resolver` | No | — | Record of `Tool` instances available to the agent | +| `agents` | `Resolver` | No | — | Record of subagents auto-wrapped as callable tools | +| `maxSteps` | `Resolver` | No | `20` | Max tool-loop iterations | +| `output` | `OutputParam` | No | `Output.text()` | Output type strategy | +| `logger` | `Resolver` | No | pino default | Pino-compatible logger | +| `onStart` | `(event: { input: TInput }) => void \| Promise` | No | — | Fires when the agent starts | +| `onFinish` | `(event: { input: TInput; result: GenerateResult; duration: number }) => void \| Promise` | No | — | Fires on success | +| `onError` | `(event: { input: TInput; error: Error }) => void \| Promise` | No | — | Fires on error | +| `onStepFinish` | `(event: StepFinishEvent) => void \| Promise` | No | — | Fires after each tool-loop step | + +### Resolver type + +```typescript +type Resolver = T | ((params: { input: TInput }) => T | Promise); +``` + +Static values or functions resolved at `.generate()` / `.stream()` time with the validated input. + +## Two Modes + +| Mode | `input` schema | `prompt` fn | `.generate()` accepts | +| ------ | ----------------- | ----------- | ------------------------------------------------- | +| Simple | Omitted | Omitted | `{ prompt: string }` or `{ messages: Message[] }` | +| Typed | `ZodType` | Required | `{ input: TInput }` | + +## Agent Interface + +```typescript +interface Agent { + readonly model: Resolver; + + generate( + params: GenerateParams, + ): Promise>>; + stream( + params: GenerateParams, + ): Promise>>; + fn(): ( + params: GenerateParams, + ) => Promise>>; +} +``` + +| Method | Returns | Description | +| ------------------- | ------------------------------------------ | ----------------------------------------- | +| `.generate(params)` | `Promise>>` | Run to completion; returns wrapped result | +| `.stream(params)` | `Promise>>` | Run with streaming; returns stream handle | +| `.fn()` | Function with `.generate()` signature | Returns a plain callable function | + +## GenerateParams + +Input is exactly one of `prompt`, `messages`, or `input`. All other fields are optional per-call overrides. + +```typescript +type GenerateParams = BaseGenerateParams & + AgentGenerateOverrides & + ( + | { prompt: string; messages?: undefined; input?: undefined } + | { messages: Message[]; prompt?: undefined; input?: undefined } + | { input: TInput; prompt?: undefined; messages?: undefined } + ); +``` + +| Field | Type | Description | +| -------------- | -------------------------------------------------------------------------------------------------------- | ------------------------------- | +| `prompt` | `string` | Raw string prompt (simple mode) | +| `messages` | `Message[]` | Message array (simple mode) | +| `input` | `TInput` | Typed input (typed mode) | +| `model` | `Model` | Override model for this call | +| `system` | `string \| ((params: { input: unknown }) => string)` | Override system prompt | +| `tools` | `Partial & Record` | Override/extend tools | +| `agents` | `Partial & Record` | Override/extend subagents | +| `maxSteps` | `number` | Override max tool-loop steps | +| `output` | `OutputParam` | Override output strategy | +| `signal` | `AbortSignal` | Cancellation signal | +| `timeout` | `number` | Auto-abort after N milliseconds | +| `logger` | `Logger` | Override logger for this call | +| `onStart` | `(event: { input: TInput }) => void \| Promise` | Per-call start hook | +| `onFinish` | `(event: { input: TInput; result: GenerateResult; duration: number }) => void \| Promise` | Per-call finish hook | +| `onError` | `(event: { input: TInput; error: Error }) => void \| Promise` | Per-call error hook | +| `onStepFinish` | `(event: StepFinishEvent) => void \| Promise` | Per-call step-finish hook | + +Per-call hooks merge with base config hooks — base fires first, then call-level. + +## GenerateResult + +```typescript +interface GenerateResult { + output: TOutput; + messages: Message[]; + usage: TokenUsage; + finishReason: string; +} +``` + +| Field | Type | Description | +| -------------- | ------------ | ------------------------------------------------------------------------------ | +| `output` | `TOutput` | Generation output; type depends on `OutputParam` | +| `messages` | `Message[]` | Full message history including tool calls | +| `usage` | `TokenUsage` | Aggregated token counts across all tool-loop steps | +| `finishReason` | `string` | `"stop"`, `"length"`, `"content-filter"`, `"tool-calls"`, `"error"`, `"other"` | + +## StreamResult + +```typescript +interface StreamResult { + output: Promise; + messages: Promise; + usage: Promise; + finishReason: Promise; + fullStream: AsyncIterableStream; + toTextStreamResponse(init?: ResponseInit): Response; + toUIMessageStreamResponse(options?: ResponseInit & UIMessageStreamOptions): Response; +} +``` + +| Field | Type | Description | +| ----------------------------- | --------------------------------- | ------------------------------------------- | +| `output` | `Promise` | Resolves after stream completes | +| `messages` | `Promise` | Resolves after stream completes | +| `usage` | `Promise` | Resolves after stream completes | +| `finishReason` | `Promise` | Resolves after stream completes | +| `fullStream` | `AsyncIterableStream` | Typed stream events; use `for await...of` | +| `toTextStreamResponse()` | `Response` | Plain-text streaming HTTP response | +| `toUIMessageStreamResponse()` | `Response` | Vercel AI SDK `useChat`-compatible response | + +`StreamPart` is `TextStreamPart` — discriminated on `part.type`: `"text-delta"`, `"tool-call"`, `"tool-result"`, `"finish"`, `"error"`, etc. + +## OutputParam + +```typescript +type OutputParam = OutputSpec | ZodType; +``` + +| Value | Result type | Description | +| ---------------------------- | ----------- | --------------------------------- | +| `Output.text()` | `string` | Plain text (default) | +| `Output.object({ schema })` | `T` | Validated structured object | +| `Output.array({ element })` | `T[]` | Validated array | +| `Output.choice({ options })` | `string` | Enum/classification | +| `z.object({ ... })` | `T` | Auto-wrapped as `Output.object()` | +| `z.array(z.object({ ... }))` | `T[]` | Auto-wrapped as `Output.array()` | + +## AgentOverrides + +```typescript +type AgentOverrides = + | Partial> + | ((config: AgentConfig<...>) => Partial>) +``` + +Used with `evolve()`. Accepts either a partial config object or a mapper function that receives the current config and returns partial overrides. Scalars replace; `tools` and `agents` records are shallow-merged. + +## Result Pattern + +Every public method returns `Result` — a discriminated union. Success fields are flat on the object. + +```typescript +type Result = (T & { ok: true }) | { ok: false; error: ResultError }; + +interface ResultError { + code: string; // machine-readable error code + message: string; // human-readable description + cause?: Error; // original thrown error +} +``` + +Check `result.ok` before accessing success fields: + +```typescript +const result = await myAgent.generate({ prompt: "Hello" }); +if (!result.ok) { + console.error(result.error.code, result.error.message); + return; +} +console.log(result.output); // TOutput +console.log(result.messages); // Message[] +``` + +## See Also + +- [Agents concept](/concepts/agents) — overview with usage examples +- [Streaming guide](/guides/streaming) +- [`tool()` reference](/reference/agents/tool) +- [`flowAgent()` reference](/reference/agents/flow-agent) diff --git a/docs/reference/calculate-cost.md b/docs/reference/calculate-cost.md new file mode 100644 index 0000000..e0ba788 --- /dev/null +++ b/docs/reference/calculate-cost.md @@ -0,0 +1,169 @@ +# calculateCost() + +Calculate the USD cost of a model invocation from token usage counts and per-token pricing. Pair with `model()` to get pricing from the catalog, or pass pricing directly. + +## Function Signature + +```typescript +function calculateCost(usage: TokenUsage, pricing: ModelPricing): UsageCost; +``` + +## Parameters + +### TokenUsage + +Aggregated token counts from a model invocation. All fields are resolved `number` values — `0` when the provider does not report a given field. See [`createProviderRegistry()` reference](/reference/models/provider-registry#tokenusage) for the full type definition. + +### ModelPricing + +```typescript +interface ModelPricing { + readonly input: number; + readonly output: number; + readonly cacheRead?: number; + readonly cacheWrite?: number; + readonly reasoning?: number; +} +``` + +| Field | Type | Required | Description | +| ------------ | -------- | -------- | -------------------------------------------- | +| `input` | `number` | Yes | Cost per input token in USD | +| `output` | `number` | Yes | Cost per output token in USD | +| `cacheRead` | `number` | No | Cost per cached read token; defaults to `0` | +| `cacheWrite` | `number` | No | Cost per cached write token; defaults to `0` | +| `reasoning` | `number` | No | Cost per reasoning token; defaults to `0` | + +Rates are per-token in USD, pre-converted from per-million-token values at catalog generation time. + +## UsageCost Return Type + +```typescript +interface UsageCost { + readonly input: number; + readonly output: number; + readonly cacheRead: number; + readonly cacheWrite: number; + readonly reasoning: number; + readonly total: number; +} +``` + +| Field | Type | Description | +| ------------ | -------- | ----------------------------------- | +| `input` | `number` | Cost for input tokens in USD | +| `output` | `number` | Cost for output tokens in USD | +| `cacheRead` | `number` | Cost for cached read tokens in USD | +| `cacheWrite` | `number` | Cost for cached write tokens in USD | +| `reasoning` | `number` | Cost for reasoning tokens in USD | +| `total` | `number` | Sum of all fields in USD | + +All fields are non-negative. Fields that don't apply are `0`. + +## Formula + +``` +input = usage.inputTokens * pricing.input +output = usage.outputTokens * pricing.output +cacheRead = usage.cacheReadTokens * (pricing.cacheRead ?? 0) +cacheWrite= usage.cacheWriteTokens* (pricing.cacheWrite ?? 0) +reasoning = usage.reasoningTokens * (pricing.reasoning ?? 0) +total = input + output + cacheRead + cacheWrite + reasoning +``` + +## Usage Helpers + +> These functions are exported from `@funkai/agents`, not `@funkai/models`. + +These functions operate on `TokenUsageRecord[]` — the raw tracking records collected from agent execution traces. + +### usage() + +```typescript +function usage(records: TokenUsageRecord[]): TokenUsage; +``` + +Sum all token usage records into a single flat `TokenUsage`. Treats `undefined` fields as `0`. Returns zero-valued usage for an empty array. + +### usageByAgent() + +```typescript +function usageByAgent(records: TokenUsageRecord[]): readonly AgentTokenUsage[]; +``` + +Group and aggregate usage by agent. Records without an `agentId` are grouped as `{ type: 'unattributed' }`. + +```typescript +interface AgentTokenUsage extends TokenUsage { + readonly source: AgentSource | UnattributedSource; +} + +interface AgentSource { + readonly type: "agent"; + readonly agentId: string; +} + +interface UnattributedSource { + readonly type: "unattributed"; +} +``` + +### usageByModel() + +```typescript +function usageByModel(records: TokenUsageRecord[]): readonly ModelTokenUsage[]; +``` + +Group and aggregate usage by model ID. + +```typescript +interface ModelTokenUsage extends TokenUsage { + readonly modelId: string; +} +``` + +### collectUsages() + +```typescript +function collectUsages(trace: readonly TraceEntry[]): TokenUsage[]; +``` + +Recursively walk a `FlowAgentGenerateResult.trace` tree and extract all `TokenUsage` values. Entries without usage are skipped. Returns a flat array suitable for passing to `usage()`, `usageByAgent()`, or `usageByModel()`. + +## Combined Example + +```typescript +import { calculateCost, model } from '@funkai/models' +import { collectUsages, usage, usageByAgent, usageByModel } from '@funkai/agents' + +// From a flow agent result +const result = await myFlow.generate({ input: { ... } }) +if (!result.ok) return + +// Collect raw usage records from the trace +const records = collectUsages(result.trace) + +// Aggregate total usage +const totals = usage(records) + +// Per-agent breakdown +const perAgent = usageByAgent(records) + +// Per-model breakdown +const perModel = usageByModel(records) + +// Calculate cost using catalog pricing +const m = model('gpt-4.1') +if (m) { + const cost = calculateCost(totals, m.pricing) + console.log(`Total cost: $${cost.total.toFixed(6)}`) + console.log(`Input: $${cost.input.toFixed(6)}`) + console.log(`Output: $${cost.output.toFixed(6)}`) +} +``` + +## See Also + +- [Cost Tracking guide](/guides/cost-tracking) — patterns for budget enforcement and per-step cost logging +- [`model()` reference](/reference/models/model) — look up model pricing from the catalog +- [`createProviderRegistry()` reference](/reference/models/provider-registry) — resolve model IDs to providers diff --git a/docs/reference/create-prompt-group.md b/docs/reference/create-prompt-group.md new file mode 100644 index 0000000..dd39d6a --- /dev/null +++ b/docs/reference/create-prompt-group.md @@ -0,0 +1,66 @@ +# createPromptGroup() + +Create a group of related prompt modules under a shared namespace. Groups are used by codegen to organize prompts into nested registry paths. Most users interact with groups through the generated registry rather than calling this function directly. + +## Function Signature + +```typescript +function createPromptGroup>(name: string, prompts: T): T; +``` + +| Parameter | Type | Description | +| --------- | ---------------------------------------- | --------------------------------------------------- | +| `name` | `string` | Group name applied to each prompt (e.g. `'agents'`) | +| `prompts` | `T extends Record` | Record of prompt modules to group | + +**Returns:** A new record with the same keys, each module tagged with the group name. + +## How Groups Work + +Groups correspond to the `group` field in `.prompt` file frontmatter. Each `/`-separated segment becomes a nesting level in the registry, with names converted to camelCase. + +```yaml +# In a .prompt file +--- +name: system-prompt +group: agents/coverage-assessor +--- +``` + +The codegen output registers this prompt at the path `prompts.agents.coverageAssessor.systemPrompt`. + +## Usage + +```typescript +import { createPrompt, createPromptGroup, createPromptRegistry } from "@funkai/prompts"; +import { z } from "zod"; + +// Create individual prompts +const systemPrompt = createPrompt({ + name: "system-prompt", + template: "You are a {{ language }} code reviewer.", + schema: z.object({ language: z.string() }), +}); + +const feedbackPrompt = createPrompt({ + name: "feedback", + template: "Provide feedback on:\n\n{{ code }}", + schema: z.object({ code: z.string() }), +}); + +// Group them under a namespace +const reviewer = createPromptGroup("agents/reviewer", { + systemPrompt, + feedback: feedbackPrompt, +}); + +// Assemble into a registry +const prompts = createPromptRegistry({ + agents: { reviewer }, +}); + +// Access via nested path +prompts.agents.reviewer.systemPrompt.render({ language: "TypeScript" }); +``` + +This function is primarily called by generated code. See [`createPromptRegistry()`](/reference/prompts/create-prompt-registry) for the consumer-facing API. diff --git a/docs/reference/create-prompt-registry.md b/docs/reference/create-prompt-registry.md new file mode 100644 index 0000000..3609469 --- /dev/null +++ b/docs/reference/create-prompt-registry.md @@ -0,0 +1,56 @@ +# createPromptRegistry() + +Create a typed, deep-frozen prompt registry from a map of prompt modules. The registry provides direct property access to render prompts with validated variables. Typically called by generated code, but can be used directly for runtime-only setups. + +## Function Signature + +```typescript +function createPromptRegistry(modules: T): PromptRegistry; +``` + +| Parameter | Type | Description | +| --------- | --------------------------- | ------------------------------------------------------------------ | +| `modules` | `T extends PromptNamespace` | Record of camelCase prompt names (or nested namespaces) to modules | + +**Returns:** `PromptRegistry` — deep-readonly, direct property access. + +```typescript +const prompts = createPromptRegistry({ + agents: { coverageAssessor }, + greeting, +}); + +prompts.agents.coverageAssessor.render({ scope: "full" }); +prompts.greeting.render({ name: "Alice" }); +``` + +## PromptNamespace + +```typescript +interface PromptNamespace { + readonly [key: string]: PromptModule | PromptNamespace; +} +``` + +Recursive tree structure — values are either `PromptModule` leaves or nested `PromptNamespace` nodes. + +## PromptRegistry + +```typescript +type PromptRegistry = { + readonly [K in keyof T]: T[K] extends PromptModule + ? T[K] + : T[K] extends PromptNamespace + ? PromptRegistry + : T[K]; +}; +``` + +Deep-readonly version of a prompt tree. Prevents reassignment at any nesting level. Nesting is driven by the `group` field in `.prompt` frontmatter. + +## See Also + +- [Prompts concept](/concepts/prompts) — overview of the `.prompt` file format and codegen workflow +- [`createPrompt()` reference](/reference/prompts/create-prompt) — create individual prompt modules +- [`createPromptGroup()` reference](/reference/prompts/create-prompt-group) — group related prompts +- [Prompts CLI reference](/reference/prompts/cli) — codegen, lint, create, setup commands diff --git a/docs/reference/create-prompt.md b/docs/reference/create-prompt.md new file mode 100644 index 0000000..2a51bbb --- /dev/null +++ b/docs/reference/create-prompt.md @@ -0,0 +1,77 @@ +# createPrompt() + +Create a prompt module from a config object. Encapsulates LiquidJS template rendering and Zod variable validation into a single callable unit. This is the low-level API — most users create prompts via `.prompt` files and codegen instead. + +## Function Signature + +```typescript +function createPrompt(config: PromptConfig): PromptModule; +``` + +## Usage + +```typescript +import { createPrompt } from "@funkai/prompts"; +import { z } from "zod"; + +const greeting = createPrompt({ + name: "greeting", + template: "Hello, {{ name }}! You are a {{ role }}.", + schema: z.object({ + name: z.string(), + role: z.string(), + }), +}); + +// Render with validated variables +const text = greeting.render({ name: "Alice", role: "developer" }); +// => "Hello, Alice! You are a developer." + +// Validate without rendering +const parsed = greeting.validate({ name: "Bob", role: "designer" }); +``` + +## PromptConfig + +```typescript +interface PromptConfig { + readonly name: string; + readonly template: string; + readonly schema: ZodType; + readonly group?: string; +} +``` + +| Field | Type | Required | Description | +| ---------- | ------------ | -------- | ------------------------------------------------------------ | +| `name` | `string` | Yes | Kebab-case identifier (e.g. `'greeting'`, `'worker-system'`) | +| `template` | `string` | Yes | LiquidJS template string with `{{ variable }}` expressions | +| `schema` | `ZodType` | Yes | Zod schema for validating template variables | +| `group` | `string` | No | Namespace path (e.g. `'agents'`, `'agents/core'`) | + +## PromptModule + +```typescript +interface PromptModule { + readonly name: string; + readonly group: string | undefined; + readonly schema: ZodType; + render(variables: T): string; + validate(variables: unknown): T; +} +``` + +| Member | Description | +| --------------------- | -------------------------------------------------------------------------------- | +| `name` | Prompt identifier | +| `group` | Group/namespace path or `undefined` | +| `schema` | Zod schema for variable validation | +| `render(variables)` | Validate variables and render the LiquidJS template; returns the rendered string | +| `validate(variables)` | Validate and parse variables through the Zod schema; throws on failure | + +## See Also + +- [Prompts concept](/concepts/prompts) — overview of the `.prompt` file format and codegen workflow +- [`createPromptGroup()` reference](/reference/prompts/create-prompt-group) — group related prompts +- [`createPromptRegistry()` reference](/reference/prompts/create-prompt-registry) — assemble a typed registry +- [Prompts CLI reference](/reference/prompts/cli) — codegen, lint, create, setup commands diff --git a/docs/reference/flow-agent.md b/docs/reference/flow-agent.md new file mode 100644 index 0000000..33d2b8b --- /dev/null +++ b/docs/reference/flow-agent.md @@ -0,0 +1,375 @@ +# flowAgent() + +Create a multi-step agent whose orchestration logic is plain imperative TypeScript. The handler function receives a `$` StepBuilder for tracked operations that appear in the execution trace. Flow agents require a typed `input` Zod schema; the `output` schema is optional (omitting it yields a `string` output from collected sub-agent text). + +## Function Signature + +```typescript +// With structured output +function flowAgent( + config: FlowAgentConfigWithOutput, + handler: FlowAgentHandler, +): FlowAgent; + +// Without output schema (void handler, string output) +function flowAgent( + config: FlowAgentConfigWithoutOutput, + handler: FlowAgentHandler, +): FlowAgent; +``` + +## FlowAgentConfig + +`FlowAgentConfig` is a union of the two variants below. + +### Shared fields (both variants) + +| Field | Type | Required | Default | Description | +| -------------- | ------------------------------------------------------------------- | -------- | ------- | ------------------------------------------ | +| `name` | `string` | Yes | — | Unique flow agent name | +| `input` | `ZodType` | Yes | — | Zod schema for validating input | +| `agents` | `FlowSubAgents` | No | — | Named agent dependencies passed to handler | +| `logger` | `Resolver` | No | default | Pino-compatible logger | +| `onStart` | `(event: { input: TInput }) => void \| Promise` | No | — | Fires when flow starts | +| `onError` | `(event: { input: TInput; error: Error }) => void \| Promise` | No | — | Fires on error | +| `onStepStart` | `(event: { step: StepInfo }) => void \| Promise` | No | — | Fires when a `$` step starts | +| `onStepFinish` | `(event: StepFinishEvent) => void \| Promise` | No | — | Fires when a `$` step finishes | + +### With output (`FlowAgentConfigWithOutput`) + +| Field | Type | Required | Description | +| ---------- | ----------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------ | +| `output` | `ZodType` | Yes | Zod schema validating handler return value | +| `onFinish` | `(event: { input: TInput; result: FlowAgentGenerateResult; duration: number }) => void \| Promise` | No | Fires on success | + +### Without output (`FlowAgentConfigWithoutOutput`) + +| Field | Type | Required | Description | +| ---------- | ---------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------ | +| `output` | `undefined` | — | Omitted or `undefined` | +| `onFinish` | `(event: { input: TInput; result: FlowAgentGenerateResult; duration: number }) => void \| Promise` | No | Fires on success; output is `string` | + +## FlowAgentHandler + +```typescript +type FlowAgentHandler = (params: FlowAgentParams) => Promise; +``` + +### FlowAgentParams + +| Field | Type | Description | +| -------- | --------------- | ------------------------------------------------- | +| `input` | `TInput` | Validated input | +| `$` | `StepBuilder` | Composable step utilities (see StepBuilder below) | +| `log` | `Logger` | Scoped logger for this execution | +| `agents` | `FlowSubAgents` | Named agent dependencies from config | + +## FlowAgentGenerateResult + +Extends `GenerateResult` with flow-specific fields. + +```typescript +interface FlowAgentGenerateResult extends GenerateResult { + trace: readonly TraceEntry[]; + duration: number; +} +``` + +| Field | Type | Description | +| -------------- | ----------------------- | ------------------------------------- | +| `output` | `TOutput` | Validated handler return value | +| `messages` | `Message[]` | Full message history | +| `usage` | `TokenUsage` | Aggregated token counts | +| `finishReason` | `string` | Finish reason string | +| `trace` | `readonly TraceEntry[]` | Frozen execution trace tree | +| `duration` | `number` | Total wall-clock time in milliseconds | + +## FlowAgent Interface + +```typescript +interface FlowAgent { + generate(params: GenerateParams): Promise>> + stream(params: GenerateParams): Promise>> + fn(): (params: GenerateParams) => Promise>> +} +``` + +## StepBuilder ($) + +The `$` object provides tracked operations. Every call appears in the execution trace. `$` is passed into nested callbacks so operations can be composed. + +| Method | Signature | Returns | Description | +| ---------- | -------------------------------------------------------------------------- | ---------------------------- | ------------------------------------------ | +| `$.step` | `(config: StepConfig) => Promise>` | `StepResult` | Single unit of work | +| `$.agent` | `(config: AgentStepConfig) => Promise>` | `StepResult` | Agent call as tracked step | +| `$.map` | `(config: MapConfig) => Promise>` | `StepResult` | Parallel map with optional concurrency | +| `$.each` | `(config: EachConfig) => Promise>` | `StepResult` | Sequential side effects | +| `$.reduce` | `(config: ReduceConfig) => Promise>` | `StepResult` | Sequential accumulation | +| `$.while` | `(config: WhileConfig) => Promise>` | `StepResult` | Conditional loop | +| `$.all` | `(config: AllConfig) => Promise>` | `StepResult` | Concurrent heterogeneous ops (Promise.all) | +| `$.race` | `(config: RaceConfig) => Promise>` | `StepResult` | First-to-finish wins (Promise.race) | + +### StepConfig + +```typescript +interface StepConfig { + id: string; + execute: (params: { $: StepBuilder }) => Promise; + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { id: string; result: T; duration: number }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} +``` + +### AgentStepConfig + +```typescript +interface AgentStepConfig { + id: string; + agent: Agent; + input: TInput; + config?: Omit; + stream?: boolean; // pipe agent text through parent flow stream; default false + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { + id: string; + result: GenerateResult; + duration: number; + }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} +``` + +### MapConfig + +```typescript +interface MapConfig { + id: string; + input: readonly T[]; + concurrency?: number; // default: Infinity + execute: (params: { item: T; index: number; $: StepBuilder }) => Promise; + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { id: string; result: R[]; duration: number }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} +``` + +### EachConfig + +```typescript +interface EachConfig { + id: string; + input: readonly T[]; + execute: (params: { item: T; index: number; $: StepBuilder }) => Promise; + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { id: string; duration: number }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} +``` + +### ReduceConfig + +```typescript +interface ReduceConfig { + id: string; + input: readonly T[]; + initial: R; + execute: (params: { item: T; accumulator: R; index: number; $: StepBuilder }) => Promise; + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { id: string; result: R; duration: number }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} +``` + +### WhileConfig + +```typescript +interface WhileConfig { + id: string; + condition: (params: { value: T | undefined; index: number }) => boolean; + execute: (params: { index: number; $: StepBuilder }) => Promise; + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { + id: string; + result: T | undefined; + duration: number; + }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} +``` + +### AllConfig / RaceConfig + +```typescript +type EntryFactory = (signal: AbortSignal, $: StepBuilder) => Promise; + +interface AllConfig { + id: string; + entries: EntryFactory[]; + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { id: string; result: unknown[]; duration: number }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} + +interface RaceConfig { + id: string; + entries: EntryFactory[]; + onStart?: (event: { id: string }) => void | Promise; + onFinish?: (event: { id: string; result: unknown; duration: number }) => void | Promise; + onError?: (event: { id: string; error: Error }) => void | Promise; +} +``` + +## StepResult + +```typescript +type StepResult = + | { ok: true; value: T; step: StepInfo; duration: number } + | { ok: false; error: StepError; step: StepInfo; duration: number }; + +interface StepError extends ResultError { + stepId: string; // the id from the failed step config +} +``` + +## TraceEntry + +```typescript +interface TraceEntry { + id: string; // matches the id from the $ config + type: OperationType; + input?: unknown; + output?: unknown; + startedAt: number; // Unix ms + finishedAt?: number; // Unix ms; undefined while running + error?: Error; + usage?: TokenUsage; // populated for agent-type entries + children?: readonly TraceEntry[]; +} +``` + +### OperationType values + +```typescript +type OperationType = "step" | "agent" | "map" | "each" | "reduce" | "while" | "all" | "race"; +``` + +## StepInfo + +```typescript +interface StepInfo { + id: string; + index: number; // auto-incrementing, starts at 0 + type: OperationType; +} +``` + +## StepFinishEvent + +Emitted by `onStepFinish`. Agent tool-loop steps populate the left columns; flow orchestration steps populate the right. + +| Field | Type | Present on | +| ------------- | -------------------------------------------------------------------- | ------------------------ | +| `stepId` | `string` | Agent tool-loop steps | +| `toolCalls` | `readonly { toolName: string; argsTextLength: number }[]` | Agent tool-loop steps | +| `toolResults` | `readonly { toolName: string; resultTextLength: number }[]` | Agent tool-loop steps | +| `usage` | `{ inputTokens: number; outputTokens: number; totalTokens: number }` | Agent tool-loop steps | +| `step` | `StepInfo` | Flow orchestration steps | +| `result` | `unknown` | Flow orchestration steps | +| `duration` | `number` | Flow orchestration steps | + +## FlowAgentOverrides + +```typescript +type FlowAgentOverrides = + | Partial> + | ((config: FlowAgentConfig) => Partial>); +``` + +Scalars replace; the `agents` record is shallow-merged. + +## createFlowEngine() + +Creates a `flowAgent()`-like factory with additional step types merged into `$` and optional default lifecycle hooks. + +```typescript +function createFlowEngine( + config: FlowEngineConfig, +): FlowFactory; +``` + +### FlowEngineConfig + +| Field | Type | Description | +| -------------- | ----------------------------------------------------------------------------------------- | --------------------------------------------- | +| `$` | `TCustomSteps` | Map of custom step names to factory functions | +| `onStart` | `(event: { input: unknown }) => void \| Promise` | Default start hook for all flow agents | +| `onFinish` | `(event: { input: unknown; result: unknown; duration: number }) => void \| Promise` | Default finish hook | +| `onError` | `(event: { input: unknown; error: Error }) => void \| Promise` | Default error hook | +| `onStepStart` | `(event: { step: StepInfo }) => void \| Promise` | Default step-start hook | +| `onStepFinish` | `(event: StepFinishEvent) => void \| Promise` | Default step-finish hook | + +### CustomStepFactory + +```typescript +type CustomStepFactory = (params: { + ctx: ExecutionContext; + config: TConfig; +}) => Promise; +``` + +Custom step names must not conflict with built-in names: `step`, `agent`, `map`, `each`, `reduce`, `while`, `all`, `race`. + +### FlowFactory + +The return type of `createFlowEngine()`. Call it exactly like `flowAgent()`: + +```typescript +const engine = createFlowEngine({ $: { retry: retryFactory } }); + +const myFlow = engine( + { name: "my-flow", input: MyInput, output: MyOutput }, + async ({ input, $ }) => { + const data = await $.retry({ attempts: 3, execute: async () => fetch("...") }); + return data; + }, +); +``` + +## evolve() + +Creates a new agent or flow agent from an existing one with config overrides. The original is not modified. + +```typescript +// Agent overload +function evolve( + base: Agent, + overrides: AgentOverrides, +): Agent; + +// FlowAgent overload +function evolve( + base: FlowAgent, + overrides: FlowAgentOverrides, + handler?: FlowAgentHandler, +): FlowAgent; +``` + +**Merge logic:** Scalars replace. Record fields (`tools`, `agents`) are shallow-merged: `{ ...base, ...override }`. + +`overrides` can be a partial config object or a mapper function: + +```typescript +// Partial config +evolve(base, { name: "reviewer-local", model: openai("gpt-4.1-mini") }); + +// Mapper function — receives current config +evolve(base, (config) => ({ name: `${config.name}-local` })); +``` + +## See Also + +- [Flow Agents concept](/concepts/flow-agents) — overview with usage examples +- [Multi-Agent Orchestration guide](/guides/multi-agent) +- [`agent()` reference](/reference/agents/agent) +- [`tool()` reference](/reference/agents/tool) diff --git a/docs/reference/model.md b/docs/reference/model.md new file mode 100644 index 0000000..43c5e17 --- /dev/null +++ b/docs/reference/model.md @@ -0,0 +1,123 @@ +# model() + +Look up a single model definition from the catalog by its identifier. Returns the full `ModelDefinition` including pricing, capabilities, and modalities, or `null` when the ID is not found. + +## Function Signature + +```typescript +function model(id: ModelId): ModelDefinition | null; +``` + +| Parameter | Type | Description | +| --------- | --------- | ------------------------------------------------------------------------ | +| `id` | `ModelId` | Provider-native model identifier (e.g. `"gpt-4.1"`, `"claude-sonnet-4"`) | + +**Returns:** `ModelDefinition | null` + +## ModelId + +```typescript +type KnownModelId = "gpt-4.1" | "claude-sonnet-4" | /* ... */; +type ModelId = LiteralUnion; +``` + +`ModelId` provides IDE autocomplete for cataloged models while accepting arbitrary strings for custom or newly released models. + +## ModelDefinition + +```typescript +interface ModelDefinition { + readonly id: string; + readonly name: string; + readonly provider: string; + readonly family: string; + readonly pricing: ModelPricing; + readonly contextWindow: number; + readonly maxOutput: number; + readonly modalities: ModelModalities; + readonly capabilities: ModelCapabilities; +} +``` + +| Field | Type | Description | +| --------------- | ------------------- | ---------------------------------------------- | +| `id` | `string` | Provider-native identifier (e.g. `"gpt-4.1"`) | +| `name` | `string` | Human-readable display name | +| `provider` | `string` | Provider slug (e.g. `"openai"`, `"anthropic"`) | +| `family` | `string` | Model family (e.g. `"gpt"`, `"claude-sonnet"`) | +| `pricing` | `ModelPricing` | Per-token pricing rates | +| `contextWindow` | `number` | Maximum context window in tokens | +| `maxOutput` | `number` | Maximum output tokens | +| `modalities` | `ModelModalities` | Accepted input/output modalities | +| `capabilities` | `ModelCapabilities` | Boolean capability flags | + +## ModelPricing + +```typescript +interface ModelPricing { + readonly input: number; + readonly output: number; + readonly cacheRead?: number; + readonly cacheWrite?: number; + readonly reasoning?: number; +} +``` + +All rates are per-token in USD. Optional fields are absent when the provider does not support that billing dimension. + +## ModelCapabilities + +```typescript +interface ModelCapabilities { + readonly reasoning: boolean; + readonly toolCall: boolean; + readonly attachment: boolean; + readonly structuredOutput: boolean; +} +``` + +## ModelModalities + +```typescript +interface ModelModalities { + readonly input: readonly string[]; + readonly output: readonly string[]; +} +``` + +Values: `"text"`, `"image"`, `"audio"`, `"video"`, `"pdf"`. + +## Provider Subpath Exports + +Each provider has a dedicated subpath export with typed model IDs and per-provider lookup functions. + +| Subpath | Exports | +| ------------------------------- | ----------------------------------------------------------------------- | +| `@funkai/models/openai` | `OpenAIModelId`, `openAIModels`, `openAIModel(id)` | +| `@funkai/models/anthropic` | `AnthropicModelId`, `anthropicModels`, `anthropicModel(id)` | +| `@funkai/models/google` | `GoogleModelId`, `googleModels`, `googleModel(id)` | +| `@funkai/models/google-vertex` | `GoogleVertexModelId`, `googleVertexModels`, `googleVertexModel(id)` | +| `@funkai/models/mistral` | `MistralModelId`, `mistralModels`, `mistralModel(id)` | +| `@funkai/models/amazon-bedrock` | `AmazonBedrockModelId`, `amazonBedrockModels`, `amazonBedrockModel(id)` | +| `@funkai/models/groq` | `GroqModelId`, `groqModels`, `groqModel(id)` | +| `@funkai/models/deepseek` | `DeepSeekModelId`, `deepSeekModels`, `deepSeekModel(id)` | +| `@funkai/models/xai` | `XAIModelId`, `xAIModels`, `xAIModel(id)` | +| `@funkai/models/cohere` | `CohereModelId`, `cohereModels`, `cohereModel(id)` | +| `@funkai/models/fireworks-ai` | `FireworksAIModelId`, `fireworksAIModels`, `fireworksAIModel(id)` | +| `@funkai/models/togetherai` | `TogetherAIModelId`, `togetherAIModels`, `togetherAIModel(id)` | +| `@funkai/models/deepinfra` | `DeepInfraModelId`, `deepInfraModels`, `deepInfraModel(id)` | +| `@funkai/models/cerebras` | `CerebrasModelId`, `cerebrasModels`, `cerebrasModel(id)` | +| `@funkai/models/perplexity` | `PerplexityModelId`, `perplexityModels`, `perplexityModel(id)` | +| `@funkai/models/openrouter` | `OpenRouterModelId`, `openRouterModels`, `openRouterModel(id)` | +| `@funkai/models/llama` | `LlamaModelId`, `llamaModels`, `llamaModel(id)` | +| `@funkai/models/alibaba` | `AlibabaModelId`, `alibabaModels`, `alibabaModel(id)` | +| `@funkai/models/nvidia` | `NvidiaModelId`, `nvidiaModels`, `nvidiaModel(id)` | +| `@funkai/models/huggingface` | `HuggingFaceModelId`, `huggingFaceModels`, `huggingFaceModel(id)` | +| `@funkai/models/inception` | `InceptionModelId`, `inceptionModels`, `inceptionModel(id)` | + +## See Also + +- [Models concept](/concepts/models) — overview with usage examples +- [`models()` reference](/reference/models/models) — filter and query the full catalog +- [`createProviderRegistry()` reference](/reference/models/provider-registry) — resolve model IDs to providers +- [`calculateCost()` reference](/reference/models/calculate-cost) — compute cost from usage diff --git a/docs/reference/models.md b/docs/reference/models.md new file mode 100644 index 0000000..b329ba3 --- /dev/null +++ b/docs/reference/models.md @@ -0,0 +1,58 @@ +# models() + +Return the full model catalog or a filtered subset. Use predicate functions to filter by capability, provider, modality, context window, pricing, or any combination. + +## Function Signature + +```typescript +function models(filter?: (m: ModelDefinition) => boolean): readonly ModelDefinition[]; +``` + +| Parameter | Type | Description | +| --------- | --------------------------------- | --------------------------------------------- | +| `filter` | `(m: ModelDefinition) => boolean` | Optional predicate; omit to return all models | + +**Returns:** `readonly ModelDefinition[]` + +## MODELS + +```typescript +const MODELS: readonly ModelDefinition[]; +``` + +Full catalog array. All models from all providers, generated from models.dev. + +## Filtering Patterns + +```typescript +import { models } from "@funkai/models"; + +// By capability +const reasoning = models((m) => m.capabilities.reasoning); +const withTools = models((m) => m.capabilities.toolCall); + +// By provider +const openai = models((m) => m.provider === "openai"); + +// By modality +const vision = models((m) => m.modalities.input.includes("image")); + +// By context window +const large = models((m) => m.contextWindow >= 128_000); + +// Combined +const ideal = models( + (m) => m.capabilities.reasoning && m.capabilities.toolCall && m.contextWindow >= 128_000, +); + +// Sort by price +const cheapest = models((m) => m.capabilities.reasoning).toSorted( + (a, b) => a.pricing.input - b.pricing.input, +); +``` + +## See Also + +- [Models concept](/concepts/models) — overview with usage examples +- [`model()` reference](/reference/models/model) — type definitions for `ModelDefinition`, `ModelPricing`, `ModelCapabilities`, `ModelModalities` +- [`calculateCost()` reference](/reference/models/calculate-cost) — compute cost from usage diff --git a/docs/reference/prompts-cli.md b/docs/reference/prompts-cli.md new file mode 100644 index 0000000..5b73e74 --- /dev/null +++ b/docs/reference/prompts-cli.md @@ -0,0 +1,70 @@ +# Prompts CLI + +Command-line interface for working with `.prompt` files. Handles code generation, validation, scaffolding, and project setup. All commands are available via the `funkai prompts` binary from `@funkai/cli`. + +## prompts generate + +Generate typed TypeScript modules from `.prompt` files. Also available as `prompts gen`. + +```bash +prompts generate --out .prompts/client --includes "prompts/**" "src/agents/**" +``` + +| Flag | Alias | Required | Description | +| ------------ | ----- | -------- | ------------------------------------------- | +| `--out` | `-o` | Yes | Output directory for generated files | +| `--includes` | `-r` | Yes | Glob pattern(s) to scan for `.prompt` files | +| `--silent` | — | No | Suppress output except errors | + +Runs lint validation automatically before generating. Exits with code 1 on lint errors. Custom partials are auto-discovered from the sibling `partials/` directory relative to `--out`. + +## prompts lint + +Validate `.prompt` files without generating output. + +```bash +prompts lint --includes "prompts/**" "src/agents/**" +``` + +| Flag | Alias | Required | Description | +| ------------ | ----- | -------- | -------------------------------------------------------- | +| `--includes` | `-r` | Yes | Glob pattern(s) to scan for `.prompt` files | +| `--partials` | `-p` | No | Custom partials directory (default: `.prompts/partials`) | +| `--silent` | — | No | Suppress output except errors | + +| Diagnostic level | Meaning | +| ---------------- | ---------------------------------------- | +| Error | Template variable not declared in schema | +| Warn | Schema variable not used in template | + +## prompts create + +Scaffold a new `.prompt` file. + +```bash +prompts create coverage-assessor --out src/agents/coverage-assessor +prompts create summary --partial +``` + +| Argument / Flag | Required | Description | +| --------------- | -------- | ------------------------------------------------------------- | +| `` | Yes | Prompt name (kebab-case) | +| `--out` | No | Output directory (defaults to cwd) | +| `--partial` | No | Create as a partial in `.prompts/partials/` (ignores `--out`) | + +## prompts setup + +Interactive project configuration. No flags — fully interactive. + +Configures: + +1. VSCode file association (`*.prompt` -> Markdown) +2. VSCode Liquid extension recommendation +3. `.gitignore` entry for generated `.prompts/client/` directory +4. `tsconfig.json` path alias (`~prompts` -> `./.prompts/client/index.ts`) + +## See Also + +- [Prompts concept](/concepts/prompts) — overview of the `.prompt` file format and codegen workflow +- [`createPrompt()` reference](/reference/prompts/create-prompt) — runtime prompt module API +- [`createPromptRegistry()` reference](/reference/prompts/create-prompt-registry) — registry API diff --git a/docs/reference/prompts.md b/docs/reference/prompts.md new file mode 100644 index 0000000..0de4dff --- /dev/null +++ b/docs/reference/prompts.md @@ -0,0 +1,221 @@ +# Prompts + +## Library API + +### createPrompt() + +```typescript +function createPrompt(config: PromptConfig): PromptModule; +``` + +Create a prompt module from a config object. Encapsulates LiquidJS template rendering and Zod variable validation. + +### createPromptGroup() + +```typescript +function createPromptGroup(config: unknown): unknown; +``` + +Create a group of related prompt modules. Used internally by codegen output to namespace modules under a group path. + +### createPromptRegistry() + +```typescript +function createPromptRegistry(modules: T): PromptRegistry; +``` + +Create a typed, deep-frozen prompt registry from a (possibly nested) map of prompt modules. Typically called by generated `index.ts` output. + +| Parameter | Type | Description | +| --------- | --------------------------- | ------------------------------------------------------------------ | +| `modules` | `T extends PromptNamespace` | Record of camelCase prompt names (or nested namespaces) to modules | + +**Returns:** `PromptRegistry` — deep-readonly, direct property access via `prompts.agents.coverageAssessor.render(vars)`. + +## Types + +### PromptConfig + +```typescript +interface PromptConfig { + readonly name: string; + readonly template: string; + readonly schema: ZodType; + readonly group?: string; +} +``` + +| Field | Type | Required | Description | +| ---------- | ------------ | -------- | ------------------------------------------------------------ | +| `name` | `string` | Yes | Kebab-case identifier (e.g. `'greeting'`, `'worker-system'`) | +| `template` | `string` | Yes | LiquidJS template string with `{{ variable }}` expressions | +| `schema` | `ZodType` | Yes | Zod schema for validating template variables | +| `group` | `string` | No | Namespace path (e.g. `'agents'`, `'agents/core'`) | + +### PromptModule + +```typescript +interface PromptModule { + readonly name: string; + readonly group: string | undefined; + readonly schema: ZodType; + render(variables: T): string; + validate(variables: unknown): T; +} +``` + +| Member | Description | +| --------------------- | -------------------------------------------------------------------------------- | +| `name` | Prompt identifier | +| `group` | Group/namespace path or `undefined` | +| `schema` | Zod schema for variable validation | +| `render(variables)` | Validate variables and render the LiquidJS template; returns the rendered string | +| `validate(variables)` | Validate and parse variables through the Zod schema; throws on failure | + +### PromptNamespace + +```typescript +interface PromptNamespace { + readonly [key: string]: PromptModule | PromptNamespace; +} +``` + +Recursive tree structure — values are either `PromptModule` leaves or nested `PromptNamespace` nodes. + +### PromptRegistry + +```typescript +type PromptRegistry = { + readonly [K in keyof T]: T[K] extends PromptModule + ? T[K] + : T[K] extends PromptNamespace + ? PromptRegistry + : T[K]; +}; +``` + +Deep-readonly version of a prompt tree. Prevents reassignment at any nesting level. + +## .prompt File Format + +`.prompt` files are Markdown files with a YAML frontmatter block followed by a LiquidJS template body. + +``` +--- +name: greeting +group: agents +schema: + name: string + place: + type: string + required: true + description: The destination name +--- + +Hello {{ name }}, welcome to {{ place }}! +``` + +### Frontmatter Fields + +| Field | Required | Type | Constraint | Description | +| --------- | -------- | -------- | -------------- | ------------------------------------------------ | +| `name` | Yes | `string` | `^[a-z0-9-]+$` | Unique kebab-case identifier | +| `group` | No | `string` | — | Namespace path (e.g. `agents/coverage-assessor`) | +| `version` | No | `string` | — | Version identifier | +| `schema` | No | `object` | — | Variable declarations map | + +### Schema Variable Fields + +Each key under `schema` declares a template variable. + +**Shorthand** (type string only, defaults to required): + +```yaml +schema: + scope: string +``` + +**Full object**: + +```yaml +schema: + scope: + type: string + required: true + description: Assessment scope +``` + +| Field | Default | Description | +| ------------- | -------- | ---------------------------------------------------- | +| `type` | `string` | Variable type (`string` only) | +| `required` | `true` | Whether variable must be provided at render time | +| `description` | — | Human-readable description (used in generated JSDoc) | + +### Template Syntax + +The template body uses LiquidJS. Variables declared in `schema` are available as `{{ variableName }}` expressions. Partials from the sibling `partials/` directory can be included with `{% render 'partial-name' %}`. + +## CLI Commands + +All commands are available via the `prompts` binary. + +### prompts generate + +Generate typed TypeScript modules from `.prompt` files. Also available as `prompts gen`. + +```bash +prompts generate --out .prompts/client --includes "prompts/**" "src/agents/**" +``` + +| Flag | Alias | Required | Description | +| ------------ | ----- | -------- | ------------------------------------------- | +| `--out` | `-o` | Yes | Output directory for generated files | +| `--includes` | `-r` | Yes | Glob pattern(s) to scan for `.prompt` files | +| `--silent` | — | No | Suppress output except errors | + +Runs lint validation automatically before generating. Exits with code 1 on lint errors. Custom partials are auto-discovered from the sibling `partials/` directory relative to `--out`. + +### prompts lint + +Validate `.prompt` files without generating output. + +```bash +prompts lint --includes "prompts/**" "src/agents/**" +``` + +| Flag | Alias | Required | Description | +| ------------ | ----- | -------- | -------------------------------------------------------- | +| `--includes` | `-r` | Yes | Glob pattern(s) to scan for `.prompt` files | +| `--partials` | `-p` | No | Custom partials directory (default: `.prompts/partials`) | +| `--silent` | — | No | Suppress output except errors | + +| Diagnostic level | Meaning | +| ---------------- | ---------------------------------------- | +| Error | Template variable not declared in schema | +| Warn | Schema variable not used in template | + +### prompts create + +Scaffold a new `.prompt` file. + +```bash +prompts create coverage-assessor --out src/agents/coverage-assessor +prompts create summary --partial +``` + +| Argument / Flag | Required | Description | +| --------------- | -------- | ------------------------------------------------------------- | +| `` | Yes | Prompt name (kebab-case) | +| `--out` | No | Output directory (defaults to cwd) | +| `--partial` | No | Create as a partial in `.prompts/partials/` (ignores `--out`) | + +### prompts setup + +Interactive project configuration. No flags — fully interactive. + +Configures: + +1. VSCode file association (`*.prompt` → Markdown) +2. VSCode Liquid extension recommendation +3. `.gitignore` entry for generated `.prompts/client/` directory +4. `tsconfig.json` path alias (`~prompts` → `./.prompts/client/index.ts`) diff --git a/docs/reference/provider-registry.md b/docs/reference/provider-registry.md new file mode 100644 index 0000000..784eeaa --- /dev/null +++ b/docs/reference/provider-registry.md @@ -0,0 +1,130 @@ +# createProviderRegistry() + +Create a provider registry that maps string prefixes to AI SDK provider instances. The returned function resolves `"provider/model"` strings into `LanguageModel` instances suitable for passing to `agent()`. + +## Function Signature + +```typescript +function createProviderRegistry(config: ProviderRegistryConfig): ProviderRegistry; +``` + +## ProviderRegistryConfig + +```typescript +interface ProviderRegistryConfig { + readonly providers: AIProviders; +} +``` + +| Field | Type | Required | Description | +| ----------- | ------------- | -------- | ----------------------------------------------------------- | +| `providers` | `AIProviders` | Yes | Map of provider prefix strings to AI SDK provider instances | + +`AIProviders` is the parameter type accepted by the AI SDK's `createProviderRegistry`. Each key is a prefix string used to route model IDs. + +```typescript +import { createOpenAI } from "@ai-sdk/openai"; +import { anthropic } from "@ai-sdk/anthropic"; +import { createProviderRegistry } from "@funkai/models"; + +const registry = createProviderRegistry({ + providers: { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + anthropic, + }, +}); +``` + +## ProviderRegistry Type + +```typescript +type ProviderRegistry = (modelId: ModelId) => LanguageModel; +``` + +`ProviderRegistry` is a plain function. Call it with a `provider/model` string to receive a `LanguageModel` instance. + +```typescript +const gpt41 = registry("openai/gpt-4.1"); +const claude = registry("anthropic/claude-sonnet-4"); +``` + +## Resolution Algorithm + +1. The `modelId` string is validated — must be non-empty and contain `/`. +2. The prefix before the first `/` is extracted (e.g. `"openai"` from `"openai/gpt-4.1"`). +3. The prefix is looked up in the `providers` map. +4. The AI SDK's internal registry resolves the provider prefix + model suffix to a `LanguageModel` using `/` as the separator. +5. Throws `Error` if the model ID is empty, missing a `/`, or the provider is not registered. + +```typescript +// Throws: Invalid model ID "gpt-4.1": expected "provider/model" format +registry("gpt-4.1"); + +// Throws: Cannot resolve model: model ID is empty +registry(""); + +// Throws: Failed to resolve model "unknown/gpt-4.1": ... +registry("unknown/gpt-4.1"); +``` + +## OpenRouter Fallback Pattern + +A common pattern is to register a single OpenRouter provider that can resolve any `openrouter/...` model ID, providing a fallback when a specific provider is not configured. + +```typescript +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; +import { createOpenAI } from "@ai-sdk/openai"; +import { createProviderRegistry } from "@funkai/models"; + +const registry = createProviderRegistry({ + providers: { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + openrouter: createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }), + }, +}); + +// Direct provider +const m1 = registry("openai/gpt-4.1"); + +// Via OpenRouter +const m2 = registry("openrouter/anthropic/claude-sonnet-4"); +``` + +## LanguageModel Type + +```typescript +// Narrowed to AI SDK v3 specification only +type LanguageModel = Extract; +``` + +`LanguageModel` is the concrete v3 model object from the AI SDK. It is the required type for `AgentConfig.model` in `@funkai/agents`. Provider functions like `openai('gpt-4.1')` return this type. Middleware-wrapped models via `wrapLanguageModel()` also satisfy this type. + +`LanguageModel` is exported from both `@funkai/models` and `@funkai/agents`: + +```typescript +import type { LanguageModel } from "@funkai/models"; +import type { LanguageModel } from "@funkai/agents"; +``` + +Both refer to the same underlying AI SDK v3 type. + +## TokenUsage + +```typescript +interface TokenUsage { + readonly inputTokens: number; + readonly outputTokens: number; + readonly totalTokens: number; + readonly cacheReadTokens: number; + readonly cacheWriteTokens: number; + readonly reasoningTokens: number; +} +``` + +All fields are resolved `number` values — `0` when the provider does not report a given field. Exported from both `@funkai/models` and `@funkai/agents`. + +## See Also + +- [Models concept](/concepts/models) — overview with usage examples +- [`model()` reference](/reference/models/model) — look up model definitions +- [`calculateCost()` reference](/reference/models/calculate-cost) — compute cost from usage and pricing diff --git a/docs/reference/tool.md b/docs/reference/tool.md new file mode 100644 index 0000000..18efd50 --- /dev/null +++ b/docs/reference/tool.md @@ -0,0 +1,116 @@ +# tool() + +Create a typed function-calling tool for use with agents. The model sees the `description` and `inputSchema`; when the model invokes the tool, input is validated against the Zod schema before `execute` runs. + +## Function Signature + +```typescript +function tool(config: ToolConfig): Tool; +``` + +## ToolConfig + +```typescript +interface ToolConfig { + description: string; + title?: string; + inputSchema: ZodType; + outputSchema?: ZodType; + inputExamples?: { input: TInput }[]; + execute: (input: TInput) => Promise; +} +``` + +| Field | Type | Required | Description | +| --------------- | ------------------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------- | +| `description` | `string` | Yes | Human-readable description shown to the model. Guides when and how to call the tool | +| `title` | `string` | No | Optional display title shown in UIs and logs | +| `inputSchema` | `ZodType` | Yes | Zod schema serialized to JSON Schema for the model; input is validated before `execute` | +| `outputSchema` | `ZodType` | No | Validates the return value of `execute` before it is sent back to the model | +| `inputExamples` | `{ input: TInput }[]` | No | Example inputs to guide the model. Natively supported by Anthropic; use `addToolInputExamplesMiddleware` for other providers | +| `execute` | `(input: TInput) => Promise` | Yes | Called with validated input after the model requests a tool call | + +## Tool Type + +```typescript +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type Tool = ReturnType>; +``` + +`Tool` is the return type of the AI SDK's `tool()` helper. Defaults use `any` so `Record` accepts concrete typed tools without contravariance issues. + +## Tool Names + +Tool keys in the `tools` record on `AgentConfig` must be provider-safe identifiers matching `^[a-zA-Z_][a-zA-Z0-9_]*$`. Only camelCase and snake_case are accepted. Kebab-case, dot.case, and names with colons or spaces are rejected at both type level (`ToolName`) and runtime. + +```typescript +type ToolName = /* camelCase or snake_case only */ +``` + +```typescript +// Valid +{ fetchPage, search_web, getWeather } + +// Invalid — runtime error +{ 'fetch-page': ..., 'search.web': ... } +``` + +## Using Tools with Agents + +Pass a record of `Tool` instances to `AgentConfig.tools`. Keys become the tool names exposed to the model. + +```typescript +import { agent, tool } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const fetchPage = tool({ + description: "Fetch the contents of a web page by URL", + inputSchema: z.object({ url: z.string().url() }), + execute: async ({ url }) => { + const res = await fetch(url); + return { status: res.status, body: await res.text() }; + }, +}); + +const myAgent = agent({ + name: "researcher", + model: openai("gpt-4.1"), + system: "You research topics on the web.", + tools: { fetchPage }, +}); +``` + +## Using Agents as Subagents + +Pass a record of `Agent` instances to `AgentConfig.agents`. Each subagent is automatically wrapped as a callable tool. Keys must satisfy the same naming constraints as tool names. + +```typescript +const orchestrator = agent({ + name: "orchestrator", + model: openai("gpt-4.1"), + system: "Coordinate research and summarization.", + agents: { researcher, summarizer }, +}); +``` + +Abort signals propagate from parent to child automatically. The parent agent's tool loop can invoke subagents by name just like regular tools. + +## Dynamic Tools via Resolver + +Both `tools` and `agents` accept resolver functions that receive the validated input: + +```typescript +const myAgent = agent({ + name: "dynamic", + model: openai("gpt-4.1"), + input: z.object({ plan: z.enum(["basic", "pro"]) }), + prompt: ({ input }) => `Process with ${input.plan} plan`, + tools: ({ input }) => (input.plan === "pro" ? { fetchPage, search } : { search }), +}); +``` + +## See Also + +- [Tools concept](/concepts/tools) — overview with progressive examples +- [`agent()` reference](/reference/agents/agent) — agent configuration and tool integration diff --git a/examples/basic-agent/package.json b/examples/basic-agent/package.json index 6fbd93b..7de73d0 100644 --- a/examples/basic-agent/package.json +++ b/examples/basic-agent/package.json @@ -9,7 +9,7 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/openai": "^3.0.48", "@funkai/agents": "workspace:*", "zod": "catalog:" }, diff --git a/examples/flow-agent/package.json b/examples/flow-agent/package.json index 12ab512..bf6f9cd 100644 --- a/examples/flow-agent/package.json +++ b/examples/flow-agent/package.json @@ -9,7 +9,7 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/openai": "^3.0.48", "@funkai/agents": "workspace:*", "zod": "catalog:" }, diff --git a/examples/prompts-basic/package.json b/examples/prompts-basic/package.json index e4e8283..ba6de89 100644 --- a/examples/prompts-basic/package.json +++ b/examples/prompts-basic/package.json @@ -10,7 +10,7 @@ "prompts:generate": "funkai prompts generate --out .prompts/client --includes \"src/agents/**\"" }, "dependencies": { - "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/openai": "^3.0.48", "@funkai/agents": "workspace:*", "@funkai/prompts": "workspace:*", "zod": "catalog:" diff --git a/examples/prompts-subagents/package.json b/examples/prompts-subagents/package.json index be4dec4..1fca54e 100644 --- a/examples/prompts-subagents/package.json +++ b/examples/prompts-subagents/package.json @@ -10,7 +10,7 @@ "prompts:generate": "funkai prompts generate --out .prompts/client --includes \"src/agents/**\"" }, "dependencies": { - "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/openai": "^3.0.48", "@funkai/agents": "workspace:*", "@funkai/prompts": "workspace:*", "zod": "catalog:" diff --git a/examples/realworld-cli/package.json b/examples/realworld-cli/package.json index a06c85e..b695da1 100644 --- a/examples/realworld-cli/package.json +++ b/examples/realworld-cli/package.json @@ -13,13 +13,13 @@ "prompts:generate": "funkai prompts generate --out .prompts/client --includes \"api/agents/**\"" }, "dependencies": { - "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/openai": "^3.0.48", "@clack/prompts": "^1.1.0", "@funkai/agents": "workspace:*", "@funkai/prompts": "workspace:*", "@hono/node-server": "^1.19.11", "dotenv": "^17.3.1", - "hono": "^4.12.8", + "hono": "^4.12.9", "zod": "catalog:" }, "devDependencies": { diff --git a/examples/streaming/package.json b/examples/streaming/package.json index 26dedce..b400497 100644 --- a/examples/streaming/package.json +++ b/examples/streaming/package.json @@ -9,7 +9,7 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/openai": "^3.0.48", "@funkai/agents": "workspace:*", "zod": "catalog:" }, diff --git a/package.json b/package.json index a858c10..b819c19 100644 --- a/package.json +++ b/package.json @@ -14,14 +14,14 @@ "changeset": "changeset", "version": "changeset version", "release": "pnpm build && changeset publish", - "docs": "zpress dev", + "docs:dev": "zpress dev", "docs:build": "zpress build", "docs:serve": "zpress serve" }, "devDependencies": { "@changesets/cli": "^2.30.0", "@vitest/coverage-v8": "catalog:", - "@zpress/kit": "^0.2.4", + "@zpress/kit": "^0.2.12", "eslint-plugin-functional": "^9.0.4", "eslint-plugin-jsdoc": "^62.8.0", "eslint-plugin-security": "^4.0.0", diff --git a/packages/agents/README.md b/packages/agents/README.md index 9e814a6..770edb2 100644 --- a/packages/agents/README.md +++ b/packages/agents/README.md @@ -27,14 +27,15 @@ npm install @funkai/agents ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; const helper = agent({ name: "helper", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are a helpful assistant.", }); -const result = await helper.generate("What is TypeScript?"); +const result = await helper.generate({ prompt: "What is TypeScript?" }); if (!result.ok) { console.error(result.error.code, result.error.message); @@ -59,13 +60,13 @@ const fetchPage = tool({ }); ``` -### Workflow +### Flow Agent ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; -const research = workflow( +const research = flowAgent( { name: "research", input: z.object({ topic: z.string() }), @@ -87,7 +88,7 @@ const research = workflow( }); return { - summary: analysis.ok ? analysis.output : "Failed to summarize", + summary: analysis.ok ? analysis.value.output : "Failed to summarize", sources, }; }, @@ -99,30 +100,30 @@ const result = await research.generate({ topic: "Effect systems" }); ### Streaming ```ts -const result = await helper.stream("Explain closures"); +const result = await helper.stream({ prompt: "Explain closures" }); if (result.ok) { - for await (const chunk of result.stream) { - process.stdout.write(chunk); + for await (const part of result.fullStream) { + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } } } ``` ## API -| Export | Description | -| ------------------------------ | --------------------------------------------------------------------- | -| `agent(config)` | Create an agent. Returns `{ generate, stream, fn }`. | -| `tool(config)` | Create a tool for function calling. | -| `workflow(config, handler)` | Create a workflow with typed I/O and tracked steps. | -| `createWorkflowEngine(config)` | Create a workflow factory with shared configuration and custom steps. | -| `openrouter(modelId)` | Shorthand to create an OpenRouter language model from env key. | -| `createOpenRouter(options?)` | Create a reusable OpenRouter provider instance. | +| Export | Description | +| ---------------------------- | ----------------------------------------------------------------------- | +| `agent(config)` | Create an agent. Returns `{ generate, stream, fn }`. | +| `tool(config)` | Create a tool for function calling. | +| `flowAgent(config, handler)` | Create a flow agent with typed I/O and tracked steps. | +| `createFlowEngine(config)` | Create a flow agent factory with shared configuration and custom steps. | ## Documentation -For comprehensive documentation, see [docs/overview.md](docs/overview.md). +For comprehensive documentation, see the [Agents concept](/concepts/agents) and [`agent()` reference](/reference/agents/agent). ## License -[MIT](../../LICENSE) +[MIT](https://github.com/joggrdocs/funkai/blob/main/LICENSE) diff --git a/packages/agents/docs/advanced/streaming.md b/packages/agents/docs/advanced/streaming.md index b6aab4b..ec94e97 100644 --- a/packages/agents/docs/advanced/streaming.md +++ b/packages/agents/docs/advanced/streaming.md @@ -106,7 +106,7 @@ while (true) { ### Basic Streaming ```ts -const result = await myAgent.stream("Tell me a story"); +const result = await myAgent.stream({ prompt: "Tell me a story" }); if (!result.ok) { console.error(result.error.message); @@ -127,7 +127,7 @@ const finalOutput = await result.output; ```ts import { match } from "ts-pattern"; -const result = await myAgent.stream("Search and summarize"); +const result = await myAgent.stream({ prompt: "Search and summarize" }); if (!result.ok) return; for await (const part of result.fullStream) { @@ -188,7 +188,7 @@ if (result.ok) { Errors in the stream can appear as `StreamPart` events or as rejected promises on the result fields: ```ts -const result = await myAgent.stream("Generate content"); +const result = await myAgent.stream({ prompt: "Generate content" }); if (!result.ok) { console.error("Failed to start stream:", result.error.message); return; @@ -215,7 +215,8 @@ Pass an `AbortSignal` to cancel streaming: ```ts const controller = new AbortController(); -const result = await myAgent.stream("Long generation", { +const result = await myAgent.stream({ + prompt: "Long generation", signal: controller.signal, }); @@ -234,4 +235,4 @@ if (result.ok) { - [Agent](../core/agent.md) - [Core Types](../core/types.md) -- [Workflow](../core/workflow.md) +- [Flow Agent](../core/flow-agent.md) diff --git a/packages/agents/docs/core/agent.md b/packages/agents/docs/core/agent.md index 462c358..8a7160b 100644 --- a/packages/agents/docs/core/agent.md +++ b/packages/agents/docs/core/agent.md @@ -131,13 +131,13 @@ Agents declared in the `agents` field are automatically wrapped as tools that th ```ts const researcher = agent({ name: "researcher", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You research topics thoroughly.", }); const writer = agent({ name: "writer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are a technical writer. Delegate research to the researcher agent.", agents: { researcher }, }); @@ -150,11 +150,11 @@ const writer = agent({ ```ts const helper = agent({ name: "helper", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are a helpful assistant.", }); -const result = await helper.generate("What is TypeScript?"); +const result = await helper.generate({ prompt: "What is TypeScript?" }); if (result.ok) { console.log(result.output); // string } @@ -165,7 +165,7 @@ if (result.ok) { ```ts const summarizer = agent({ name: "summarizer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Summarize the following:\n\n${input.text}`, }); @@ -184,7 +184,7 @@ const search = tool({ const assistant = agent({ name: "assistant", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are a helpful assistant with web search.", tools: { search }, }); @@ -195,7 +195,7 @@ const assistant = agent({ ```ts const analyst = agent({ name: "analyst", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You analyze data. Delegate searches to the searcher.", agents: { searcher: searchAgent }, }); @@ -204,9 +204,9 @@ const analyst = agent({ ### Streaming ```ts -const result = await helper.stream("Tell me a story"); +const result = await helper.stream({ prompt: "Tell me a story" }); if (result.ok) { - for await (const chunk of result.stream) { + for await (const chunk of result.fullStream) { process.stdout.write(chunk); } const finalOutput = await result.output; @@ -216,8 +216,9 @@ if (result.ok) { ### Inline overrides ```ts -const result = await helper.generate("Explain quantum computing", { - model: "anthropic/claude-sonnet-4", +const result = await helper.generate({ + prompt: "Explain quantum computing", + model: anthropic("claude-sonnet-4-20250514"), maxSteps: 5, onFinish: ({ duration }) => console.log(`Took ${duration}ms`), }); diff --git a/packages/agents/docs/core/workflow.md b/packages/agents/docs/core/flow-agent.md similarity index 67% rename from packages/agents/docs/core/workflow.md rename to packages/agents/docs/core/flow-agent.md index 2d7a29e..b65bf0d 100644 --- a/packages/agents/docs/core/workflow.md +++ b/packages/agents/docs/core/flow-agent.md @@ -1,34 +1,34 @@ -# workflow() +# flowAgent() -`workflow()` creates a `Workflow` from a configuration object and an imperative handler function. The handler IS the workflow -- no step arrays, no definition objects. State is just variables. `$` is passed in for tracked operations. +`flowAgent()` creates a `FlowAgent` from a configuration object and an imperative handler function. The handler IS the flow agent -- no step arrays, no definition objects. State is just variables. `$` is passed in for tracked operations. ## Signature ```ts -function workflow( - config: WorkflowConfig, - handler: WorkflowHandler, -): Workflow; +function flowAgent( + config: FlowAgentConfig, + handler: FlowAgentHandler, +): FlowAgent; ``` -## WorkflowConfig +## FlowAgentConfig -| Field | Required | Type | Description | -| -------------- | -------- | --------------------------------------------------------------- | ------------------------------------------- | -| `name` | Yes | `string` | Unique workflow name (used in logs, traces) | -| `input` | Yes | `ZodType` | Zod schema for validating input | -| `output` | Yes | `ZodType` | Zod schema for validating output | -| `logger` | No | `Logger` | Pino-compatible logger | -| `onStart` | No | `(event: { input }) => void \| Promise` | Hook: fires when the workflow starts | -| `onFinish` | No | `(event: { input, output, duration }) => void \| Promise` | Hook: fires on success | -| `onError` | No | `(event: { input, error }) => void \| Promise` | Hook: fires on error | -| `onStepStart` | No | `(event: { step: StepInfo }) => void \| Promise` | Hook: fires when any `$` step starts | -| `onStepFinish` | No | `(event: { step, result, duration }) => void \| Promise` | Hook: fires when any `$` step finishes | +| Field | Required | Type | Description | +| -------------- | -------- | --------------------------------------------------------------- | --------------------------------------------- | +| `name` | Yes | `string` | Unique flow agent name (used in logs, traces) | +| `input` | Yes | `ZodType` | Zod schema for validating input | +| `output` | Yes | `ZodType` | Zod schema for validating output | +| `logger` | No | `Logger` | Pino-compatible logger | +| `onStart` | No | `(event: { input }) => void \| Promise` | Hook: fires when the flow agent starts | +| `onFinish` | No | `(event: { input, output, duration }) => void \| Promise` | Hook: fires on success | +| `onError` | No | `(event: { input, error }) => void \| Promise` | Hook: fires on error | +| `onStepStart` | No | `(event: { step: StepInfo }) => void \| Promise` | Hook: fires when any `$` step starts | +| `onStepFinish` | No | `(event: { step, result, duration }) => void \| Promise` | Hook: fires when any `$` step finishes | -## WorkflowHandler +## FlowAgentHandler ```ts -type WorkflowHandler = (params: WorkflowParams) => Promise; +type FlowAgentHandler = (params: FlowAgentParams) => Promise; ``` The handler receives `{ input, $ }`: @@ -38,22 +38,31 @@ The handler receives `{ input, $ }`: The handler returns `TOutput`, which is validated against the `output` Zod schema before being returned to the caller. -## Workflow Interface +## FlowAgent Interface ```ts -interface Workflow { - generate(input: TInput, config?: WorkflowOverrides): Promise>>; - stream(input: TInput, config?: WorkflowOverrides): Promise>>; - fn(): (input: TInput, config?: WorkflowOverrides) => Promise>>; +interface FlowAgent { + generate( + input: TInput, + config?: FlowAgentOverrides, + ): Promise>>; + stream( + input: TInput, + config?: FlowAgentOverrides, + ): Promise>>; + fn(): ( + input: TInput, + config?: FlowAgentOverrides, + ) => Promise>>; } ``` ### generate() -Runs the workflow to completion. Returns `Result>`. +Runs the flow agent to completion. Returns `Result>`. ```ts -interface WorkflowResult { +interface FlowAgentGenerateResult { output: TOutput; // validated output trace: readonly TraceEntry[]; // frozen execution trace tree usage: TokenUsage; // aggregated token usage from all $.agent() calls @@ -65,10 +74,10 @@ On success, `result.ok` is `true` and `output`, `trace`, `duration` are flat on ### stream() -Runs the workflow with streaming step progress. Returns `Result>`. +Runs the flow agent with streaming step progress. Returns `Result>`. ```ts -interface WorkflowStreamResult { +interface FlowAgentStreamResult { output: TOutput; // available after stream completes trace: readonly TraceEntry[]; // available after stream completes usage: TokenUsage; // aggregated token usage (available after stream completes) @@ -81,20 +90,20 @@ Subscribe to `stream` for real-time step progress events. ### StepEvent -Events emitted on the workflow stream: +Events emitted on the flow agent stream: -| Type | Fields | Description | -| ----------------- | ---------------------------- | ------------------------- | -| `step:start` | `step: StepInfo` | A `$` operation started | -| `step:finish` | `step`, `result`, `duration` | A `$` operation completed | -| `step:error` | `step`, `error` | A `$` operation failed | -| `workflow:finish` | `output`, `duration` | The workflow completed | +| Type | Fields | Description | +| ------------- | ---------------------------- | ------------------------- | +| `step:start` | `step: StepInfo` | A `$` operation started | +| `step:finish` | `step`, `result`, `duration` | A `$` operation completed | +| `step:error` | `step`, `error` | A `$` operation failed | +| `flow:finish` | `output`, `duration` | The flow agent completed | ### fn() Returns a plain function with the same signature as `.generate()`. Use for clean single-function exports. -## WorkflowOverrides +## FlowAgentOverrides Per-call overrides passed as the optional second parameter to `.generate()` or `.stream()`. @@ -104,28 +113,28 @@ Per-call overrides passed as the optional second parameter to `.generate()` or ` When the signal fires, all in-flight `$` operations check `signal.aborted` and abort. The signal propagates through the entire execution tree. -## createWorkflowEngine() +## createFlowEngine() -For custom step types, use `createWorkflowEngine()`. It returns a `workflow()`-like factory with custom methods added to `$`. +For custom step types, use `createFlowEngine()`. It returns a `flowAgent()`-like factory with custom methods added to `$`. ```ts -function createWorkflowEngine( - config: EngineConfig, -): WorkflowFactory; +function createFlowEngine( + config: FlowEngineConfig, +): FlowFactory; ``` -### EngineConfig +### FlowEngineConfig -| Field | Type | Description | -| -------------- | ----------------------- | ------------------------------- | -| `$` | `CustomStepDefinitions` | Custom step types to add to `$` | -| `onStart` | hook | Default hook for all workflows | -| `onFinish` | hook | Default hook for all workflows | -| `onError` | hook | Default hook for all workflows | -| `onStepStart` | hook | Default hook for all workflows | -| `onStepFinish` | hook | Default hook for all workflows | +| Field | Type | Description | +| -------------- | ----------------------- | -------------------------------- | +| `$` | `CustomStepDefinitions` | Custom step types to add to `$` | +| `onStart` | hook | Default hook for all flow agents | +| `onFinish` | hook | Default hook for all flow agents | +| `onError` | hook | Default hook for all flow agents | +| `onStepStart` | hook | Default hook for all flow agents | +| `onStepFinish` | hook | Default hook for all flow agents | -Engine-level hooks fire first, then workflow-level hooks fire second. +Engine-level hooks fire first, then flow agent-level hooks fire second. Each custom step factory receives `{ ctx: ExecutionContext, config }` where `ExecutionContext` provides the abort signal and scoped logger: @@ -139,7 +148,7 @@ type CustomStepFactory = (params: { ### Example ```ts -const engine = createWorkflowEngine({ +const engine = createFlowEngine({ $: { retry: async ({ ctx, config }) => { let lastError: Error | undefined; @@ -159,9 +168,9 @@ const engine = createWorkflowEngine({ onFinish: ({ output, duration }) => telemetry.trackFinish(output, duration), }); -const myWorkflow = engine( +const myFlowAgent = engine( { - name: "my-workflow", + name: "my-flow-agent", input: MyInput, output: MyOutput, }, @@ -180,12 +189,13 @@ const myWorkflow = engine( ## Full Example ```ts -import { workflow, agent, tool } from "@joggr/agent-sdk"; +import { flowAgent, agent, tool } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const analyzeAgent = agent({ name: "analyzer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ files: z.array(z.string()) }), prompt: ({ input }) => `Analyze these files:\n${input.files.join("\n")}`, }); @@ -193,7 +203,7 @@ const analyzeAgent = agent({ const InputSchema = z.object({ repo: z.string() }); const OutputSchema = z.object({ report: z.string(), fileCount: z.number() }); -const reporter = workflow( +const reporter = flowAgent( { name: "reporter", input: InputSchema, @@ -232,4 +242,4 @@ const result = await reporter.generate({ repo: "my-org/my-repo" }); - [Core Overview](overview.md) - [Step Builder ($)](step.md) - [Hooks](hooks.md) -- [Guide: Create a Workflow](../guides/create-workflow.md) +- [Guide: Create a Flow Agent](../guides/create-flow-agent.md) diff --git a/packages/agents/docs/core/hooks.md b/packages/agents/docs/core/hooks.md index 930ba01..929c060 100644 --- a/packages/agents/docs/core/hooks.md +++ b/packages/agents/docs/core/hooks.md @@ -1,6 +1,6 @@ # Hooks -Hooks provide lifecycle callbacks for agents, workflows, and steps. All hooks are optional. Hook errors are swallowed (logged via `attemptEachAsync`, never thrown) so they never mask the original error or interrupt execution. +Hooks provide lifecycle callbacks for agents, flow agents, and steps. All hooks are optional. Hook errors are swallowed (logged via `attemptEachAsync`, never thrown) so they never mask the original error or interrupt execution. ## Agent Hooks @@ -13,9 +13,9 @@ Set on `AgentConfig`: | `onError` | `{ input, error }` | On error, before Result is returned | | `onStepFinish` | `{ stepId }` | After each tool-loop step (counter-based: `agentName:0`, `agentName:1`, ...) | -## Workflow Hooks +## Flow Agent Hooks -Set on `WorkflowConfig`: +Set on `FlowAgentConfig`: | Hook | Event fields | When | | -------------- | -------------------------------------- | ----------------------------------------------------- | @@ -44,7 +44,8 @@ These are available on `$.step`, `$.agent`, `$.map`, `$.each`, `$.reduce`, `$.wh Agent per-call hooks are set on `AgentOverrides` (the second parameter to `.generate()` or `.stream()`). They have the same names as the base hooks but fire **after** the base hooks. ```ts -await myAgent.generate("hello", { +await myAgent.generate({ + prompt: "hello", onStart: ({ input }) => console.log("call-level start"), onFinish: ({ result, duration }) => console.log(`call done in ${duration}ms`), }); @@ -54,20 +55,20 @@ await myAgent.generate("hello", { Per-call hooks merge with base hooks -- base fires first, then call-level. Both are independently wrapped with `attemptEachAsync`, so an error in one hook does not prevent the other from running. -For workflow engines created with `createWorkflowEngine()`, engine-level hooks fire first, then workflow-level hooks fire second. +For flow engines created with `createFlowEngine()`, engine-level hooks fire first, then flow agent-level hooks fire second. ## Hook Execution Order -For a `$.agent` call inside a workflow: +For a `$.agent` call inside a flow agent: ``` -step.onStart -> workflow.onStepStart -> execute -> step.onFinish -> workflow.onStepFinish +step.onStart -> flowAgent.onStepStart -> execute -> step.onFinish -> flowAgent.onStepFinish ``` On error, the sequence diverges: ``` -step.onStart -> workflow.onStepStart -> execute (throws) -> step.onError -> workflow.onStepFinish +step.onStart -> flowAgent.onStepStart -> execute (throws) -> step.onError -> flowAgent.onStepFinish ``` For an agent's tool-loop steps: @@ -150,5 +151,5 @@ This means a failing hook will never mask the original error or prevent other ho ## References - [Agent](agent.md) -- [Workflow](workflow.md) +- [Flow Agent](flow-agent.md) - [Step Builder ($)](step.md) diff --git a/packages/agents/docs/core/overview.md b/packages/agents/docs/core/overview.md index 26d0bd8..9a97d0f 100644 --- a/packages/agents/docs/core/overview.md +++ b/packages/agents/docs/core/overview.md @@ -1,6 +1,6 @@ # Core Concepts -The core module provides the fundamental building blocks: `agent()`, `workflow()`, `tool()`, and the `$` step builder. All public operations return `Result` so callers never need try/catch. +The core module provides the fundamental building blocks: `agent()`, `flowAgent()`, `tool()`, and the `$` step builder. All public operations return `Result` so callers never need try/catch. ## Result Type @@ -11,7 +11,7 @@ type Result = (T & { ok: true }) | { ok: false; error: ResultError }; Success fields are **flat on the object** -- no `.value` wrapper. Callers pattern-match on `ok`: ```ts -const result = await myAgent.generate("hello"); +const result = await myAgent.generate({ prompt: "hello" }); if (!result.ok) { console.error(result.error.code, result.error.message); @@ -29,7 +29,7 @@ Helper constructors and type guards are exported: `ok()`, `err()`, `isOk()`, `is ## Context -The framework creates an internal `Context` for each workflow execution. Users never create or pass this directly. +The framework creates an internal `Context` for each flow agent execution. Users never create or pass this directly. ```ts interface ExecutionContext { @@ -38,7 +38,7 @@ interface ExecutionContext { } ``` -`ExecutionContext` is the public subset exposed to custom step factories via `createWorkflowEngine()`. The internal `Context` extends it with a mutable `trace: TraceEntry[]` array for recording the execution graph. +`ExecutionContext` is the public subset exposed to custom step factories via `createFlowEngine()`. The internal `Context` extends it with a mutable `trace: TraceEntry[]` array for recording the execution graph. ## Logger @@ -56,7 +56,7 @@ interface Logger { Each level also supports pino's object-first overload: `log.info({ key: 'val' }, 'message')`. -The framework calls `child()` at scope boundaries (workflow, step, agent) so log output automatically includes execution context (`workflowId`, `stepId`, `agentId`). When no logger is injected, `createDefaultLogger()` provides a console-based fallback. +The framework calls `child()` at scope boundaries (flow agent, step, agent) so log output automatically includes execution context (`flowAgentId`, `stepId`, `agentId`). When no logger is injected, `createDefaultLogger()` provides a console-based fallback. ## TraceEntry @@ -76,12 +76,12 @@ interface TraceEntry { } ``` -The trace is exposed on `WorkflowResult.trace` as a frozen (immutable) snapshot after workflow completion. +The trace is exposed on `FlowAgentGenerateResult.trace` as a frozen (immutable) snapshot after flow agent completion. ## References - [Agent](agent.md) -- [Workflow](workflow.md) +- [Flow Agent](flow-agent.md) - [Step Builder ($)](step.md) - [Tools](tools.md) - [Hooks](hooks.md) diff --git a/packages/agents/docs/core/step.md b/packages/agents/docs/core/step.md index a579781..77c519b 100644 --- a/packages/agents/docs/core/step.md +++ b/packages/agents/docs/core/step.md @@ -1,6 +1,6 @@ # $ StepBuilder -The `$` object is passed into every workflow handler and step callback. It provides tracked operations that register data flow in the execution trace. Every call through `$` becomes a `TraceEntry`. +The `$` object is passed into every flow agent handler and step callback. It provides tracked operations that register data flow in the execution trace. Every call through `$` becomes a `TraceEntry`. `$` is passed into every callback, enabling composition and nesting. You can always skip `$` and use plain imperative code -- it just will not appear in the trace. @@ -19,7 +19,7 @@ type StepResult = ```ts interface StepInfo { id: string; // from the $ config's `id` field - index: number; // auto-incrementing within the workflow + index: number; // auto-incrementing within the flow agent type: OperationType; // 'step' | 'agent' | 'map' | 'each' | 'reduce' | 'while' | 'all' | 'race' } ``` @@ -66,7 +66,7 @@ $.agent(config: AgentStepConfig): Promise` | The agent (or workflow) to invoke | +| `agent` | Yes | `Runnable` | The agent (or flow agent) to invoke | | `input` | Yes | `TInput` | Input to pass to the agent | | `config` | No | `AgentOverrides` | Inline overrides for this agent call | | `onStart` | No | hook | Hook: fires when step starts | @@ -274,6 +274,6 @@ const result = await $.step({ ## References -- [Workflow](workflow.md) +- [Flow Agent](flow-agent.md) - [Hooks](hooks.md) - [Core Overview](overview.md) diff --git a/packages/agents/docs/core/tools.md b/packages/agents/docs/core/tools.md index 72990fb..8e39851 100644 --- a/packages/agents/docs/core/tools.md +++ b/packages/agents/docs/core/tools.md @@ -33,6 +33,7 @@ type Tool = ReturnType r.output) diff --git a/packages/agents/docs/cost-tracking.md b/packages/agents/docs/cost-tracking.md new file mode 100644 index 0000000..823f550 --- /dev/null +++ b/packages/agents/docs/cost-tracking.md @@ -0,0 +1,427 @@ +# Cost Tracking + +Track token usage, calculate costs, enforce budgets, and optimize model selection for cost-efficient agents and flow agents. + +## Prerequisites + +- `@funkai/agents` installed +- `@funkai/models` installed (provides `calculateCost`, `model`, `models`) + +## Track usage per agent call + +Every successful `agent.generate()` returns `result.usage` with resolved token counts. All fields are `number` (0 when the provider does not report a field). + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; + +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", +}); + +const result = await helper.generate({ prompt: "What is TypeScript?" }); + +if (result.ok) { + console.log("Input tokens:", result.usage.inputTokens); + console.log("Output tokens:", result.usage.outputTokens); + console.log("Total tokens:", result.usage.totalTokens); + console.log("Cache read:", result.usage.cacheReadTokens); + console.log("Cache write:", result.usage.cacheWriteTokens); + console.log("Reasoning:", result.usage.reasoningTokens); +} +``` + +## Calculate cost with `@funkai/models` + +Use `calculateCost()` to convert token counts into USD amounts. Look up model pricing with `model()`. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { calculateCost, model } from "@funkai/models"; + +const summarizer = agent({ + name: "summarizer", + model: openai("gpt-4.1"), + system: "You produce concise summaries.", +}); + +const result = await summarizer.generate({ prompt: "Summarize the history of TypeScript." }); + +if (result.ok) { + const modelDef = model("gpt-4.1"); + if (!modelDef) { + return; + } + + const cost = calculateCost(result.usage, modelDef.pricing); + + console.log("Input cost:", `$${cost.input.toFixed(6)}`); + console.log("Output cost:", `$${cost.output.toFixed(6)}`); + console.log("Cache read cost:", `$${cost.cacheRead.toFixed(6)}`); + console.log("Cache write cost:", `$${cost.cacheWrite.toFixed(6)}`); + console.log("Total cost:", `$${cost.total.toFixed(6)}`); +} +``` + +## Enforce budget limits with hooks + +Use the `onFinish` hook to track cumulative cost and warn when a budget is exceeded. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { calculateCost, model } from "@funkai/models"; + +const modelDef = model("gpt-4.1"); + +if (!modelDef) { + throw new Error("Unknown model: gpt-4.1"); +} + +let cumulativeCost = 0; +const budgetLimit = 0.5; // $0.50 + +const helper = agent({ + name: "budget-helper", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", + onFinish: ({ result }) => { + const cost = calculateCost(result.usage, modelDef.pricing); + cumulativeCost += cost.total; + console.log(`Cost: $${cost.total.toFixed(6)} | Cumulative: $${cumulativeCost.toFixed(6)}`); + + if (cumulativeCost > budgetLimit) { + console.warn(`Budget exceeded: $${cumulativeCost.toFixed(4)} > $${budgetLimit}`); + } + }, +}); +``` + +Hooks are observability callbacks — they cannot abort execution. To enforce a hard budget, check cumulative cost before each call and skip or abort manually using an `AbortController`. + +## Switch models based on task complexity + +Use per-call overrides to select cheaper models for simple tasks and more capable models for complex ones. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; +import type { LanguageModel } from "@funkai/models"; + +const assistant = agent({ + name: "smart-assistant", + model: openai("gpt-4.1"), + input: z.object({ + question: z.string(), + complexity: z.enum(["simple", "complex"]), + }), + prompt: ({ input }) => input.question, +}); + +const selectModel = (complexity: "simple" | "complex"): LanguageModel => + complexity === "simple" ? openai("gpt-4.1-mini") : openai("gpt-4.1"); + +const result = await assistant.generate({ + input: { question: "What is 2 + 2?", complexity: "simple" }, + model: selectModel("simple"), +}); +``` + +## Compare model costs before selecting + +Use the `models()` function to list available models and compare pricing. + +```ts +import { models } from "@funkai/models"; + +const allModels = models(); +const sorted = [...allModels].sort((a, b) => a.pricing.input - b.pricing.input); + +console.log("Cheapest models by input cost:"); +for (const m of sorted.slice(0, 5)) { + console.log(` ${m.id}: $${(m.pricing.input * 1_000_000).toFixed(2)}/M input tokens`); +} +``` + +## Aggregate flow agent cost + +Flow agent results include `result.usage` with aggregated token counts from all `$.agent()` calls. Combine with `calculateCost()` for the total flow agent cost. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { calculateCost, model } from "@funkai/models"; +import { z } from "zod"; + +const analyzer = agent({ + name: "analyzer", + model: openai("gpt-4.1"), + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Analyze:\n\n${input.text}`, +}); + +const pipeline = flowAgent( + { + name: "analysis-pipeline", + input: z.object({ texts: z.array(z.string()) }), + output: z.object({ + analyses: z.array(z.string()), + totalTokens: z.number(), + }), + }, + async ({ input, $ }) => { + const results = await $.map({ + id: "analyze-all", + input: input.texts, + concurrency: 3, + execute: async ({ item, index, $ }) => { + const result = await $.agent({ + id: `analyze-${index}`, + agent: analyzer, + input: { text: item }, + }); + return { + analysis: result.ok ? result.value.output : "Analysis failed", + tokens: result.ok ? result.value.usage.totalTokens : 0, + }; + }, + }); + + const totalTokens = results.ok ? results.value.reduce((sum, r) => sum + r.tokens, 0) : 0; + + return { + analyses: results.ok ? results.value.map((r) => r.analysis) : [], + totalTokens, + }; + }, +); + +const result = await pipeline.generate({ input: { texts: ["Text A", "Text B", "Text C"] } }); + +if (result.ok) { + const modelDef = model("gpt-4.1"); + + if (modelDef) { + const cost = calculateCost(result.usage, modelDef.pricing); + console.log(`Flow agent total: ${result.usage.totalTokens} tokens, $${cost.total.toFixed(6)}`); + } +} +``` + +## Log per-step costs in flow agents + +Use `onStepFinish` to calculate and log the cost of each agent step as it completes. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { calculateCost, model } from "@funkai/models"; +import { z } from "zod"; + +const modelDef = model("gpt-4.1"); + +if (!modelDef) { + throw new Error("Unknown model: gpt-4.1"); +} + +const writer = agent({ + name: "writer", + model: openai("gpt-4.1"), + input: z.object({ topic: z.string() }), + prompt: ({ input }) => `Write about: ${input.topic}`, +}); + +const traced = flowAgent( + { + name: "cost-traced", + input: z.object({ topics: z.array(z.string()) }), + output: z.object({ articles: z.array(z.string()) }), + onStepFinish: ({ step, result, duration }) => { + if (result !== undefined && "usage" in result && result.usage) { + const cost = calculateCost(result.usage, modelDef.pricing); + console.log( + `[${step.id}] ${result.usage.totalTokens} tokens, $${cost.total.toFixed(6)}, ${duration}ms`, + ); + } + }, + }, + async ({ input, $ }) => { + const articles = await $.map({ + id: "write-all", + input: input.topics, + concurrency: 2, + execute: async ({ item, index, $ }) => { + const result = await $.agent({ + id: `write-${index}`, + agent: writer, + input: { topic: item }, + }); + return result.ok ? result.value.output : ""; + }, + }); + + return { articles: articles.ok ? articles.value : [] }; + }, +); +``` + +## Collect usage from the trace + +Use `collectUsages()` to recursively extract all `TokenUsage` values from a trace tree and compose with `usage()`, `usageByAgent()`, or `usageByModel()` for flexible aggregation. + +### `collectUsages()` + +Walks a `TraceEntry[]` tree and collects all `usage` values into a flat array (recursively including children). Entries without `usage` are skipped. + +```ts +import { collectUsages, usage } from "@funkai/agents"; + +const result = await myFlowAgent.generate(input); +if (result.ok) { + const total = usage(collectUsages(result.trace)); + console.log(total.inputTokens, total.outputTokens, total.totalTokens); +} +``` + +### `usage()` — sum all records + +Sum all token usage records into a single flat `TokenUsage`: + +```ts +import { usage, collectUsages } from "@funkai/agents"; + +const result = await myFlowAgent.generate(input); +if (result.ok) { + const total = usage(collectUsages(result.trace)); + console.log(total.inputTokens, total.outputTokens, total.totalTokens); +} +``` + +### `usageByAgent()` — group by agent + +Group records by agent ID and compute per-agent usage: + +```ts +import { usageByAgent, collectUsages } from "@funkai/agents"; + +const result = await myFlowAgent.generate(input); +if (result.ok) { + const byAgent = usageByAgent(collectUsages(result.trace)); + for (const entry of byAgent) { + console.log(`${entry.source.agentId}: ${entry.totalTokens} tokens`); + } +} +``` + +### `usageByModel()` — group by model + +Group records by model ID and compute per-model usage: + +```ts +import { usageByModel, collectUsages } from "@funkai/agents"; + +const result = await myFlowAgent.generate(input); +if (result.ok) { + const byModel = usageByModel(collectUsages(result.trace)); + for (const entry of byModel) { + console.log(`${entry.modelId}: ${entry.totalTokens} tokens`); + } +} +``` + +### Compute cost from trace + +Combine `collectUsages()` with `calculateCost()` to aggregate cost across the full trace: + +```ts +import { collectUsages } from "@funkai/agents"; +import { calculateCost, model } from "@funkai/models"; + +const result = await myFlowAgent.generate(input); + +if (result.ok) { + const usages = collectUsages(result.trace); + const m = model("gpt-4.1"); + + if (m) { + const totalCost = usages.reduce((sum, u) => { + const cost = calculateCost(u, m.pricing); + return sum + cost.total; + }, 0); + console.log(`Total cost: $${totalCost.toFixed(4)}`); + } +} +``` + +--- + +## Reference: TokenUsageRecord + +Raw tracking record from a single model invocation. Fields are `number | undefined` because not all providers report every field. + +| Field | Type | Description | +| ------------------ | --------------------- | ---------------------------------------- | +| `modelId` | `string` | Model ID | +| `inputTokens` | `number \| undefined` | Input (prompt) tokens | +| `outputTokens` | `number \| undefined` | Output (completion) tokens | +| `totalTokens` | `number \| undefined` | Input + output | +| `cacheReadTokens` | `number \| undefined` | Tokens served from provider prompt cache | +| `cacheWriteTokens` | `number \| undefined` | Tokens written to prompt cache | +| `reasoningTokens` | `number \| undefined` | Internal reasoning tokens (e.g. o3/o4) | +| `source` | `object \| undefined` | Framework-populated source info | + +The `source` field identifies which component produced the record: + +```ts +source?: { + flowAgentId?: string; + stepId?: string; + agentId: string; + scope: string[]; +} +``` + +## Reference: TokenUsage (resolved) + +The aggregated output type. All fields are resolved `number` (0 when the raw record was `undefined`). + +| Field | Type | Description | +| ------------------ | -------- | ------------------------- | +| `inputTokens` | `number` | Total input tokens | +| `outputTokens` | `number` | Total output tokens | +| `totalTokens` | `number` | Input + output | +| `cacheReadTokens` | `number` | Cached input tokens | +| `cacheWriteTokens` | `number` | Cache write tokens | +| `reasoningTokens` | `number` | Internal reasoning tokens | + +--- + +## Troubleshooting + +### Token counts are all zero + +Not all providers report all token fields. Check `result.usage` directly. Unreported fields default to `0`. + +### `model()` throws for unknown model ID + +Use the provider-native ID without the provider prefix (e.g. `"gpt-4.1"` not `"openai/gpt-4.1"`). Run `pnpm --filter=@funkai/models generate:models` to refresh the catalog if the model was recently added. + +### Budget hook does not prevent the next call + +Hooks are observability callbacks — they cannot abort execution. To enforce a hard budget, check the cumulative cost before each call and skip or abort manually using an `AbortController`. + +### Flow agent usage does not include non-agent steps + +`result.usage` only includes tokens from `$.agent()` calls. This is by design. Pure computation steps (`$.step`, `$.map` with non-agent logic) do not consume tokens. + +--- + +## See also + +- [`agent()` reference](/reference/agents/agent) +- [`flowAgent()` reference](/reference/agents/flow-agent) +- [`calculateCost()` reference](/reference/models/calculate-cost) diff --git a/packages/agents/docs/create-agent.md b/packages/agents/docs/create-agent.md new file mode 100644 index 0000000..b212d0e --- /dev/null +++ b/packages/agents/docs/create-agent.md @@ -0,0 +1,346 @@ +# Create an Agent + +`agent()` creates an `Agent` that wraps the AI SDK's tool loop (`generateText`/`streamText`) with typed input, subagents, hooks, and `Result` return types. + +## Simple agent + +Pass a `name`, `model`, and optional `system` prompt. In simple mode, `.generate()` accepts a `{ prompt }` object or a `Message[]`. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; + +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", +}); + +const result = await helper.generate({ prompt: "What is TypeScript?" }); +if (result.ok) { + console.log(result.output); // string +} +``` + +On success, `result.ok` is `true` and `result.output`, `result.messages`, and `result.usage` are available. On failure, `result.ok` is `false` and `result.error` contains a `ResultError`. + +## Typed I/O + +Add an `input` Zod schema and a `prompt` function. Both are required together — providing one without the other is a type error. `.generate()` now accepts the typed input via the `input` field. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const summarizer = agent({ + name: "summarizer", + model: openai("gpt-4.1"), + input: z.object({ + text: z.string(), + maxLength: z.number().optional(), + }), + prompt: ({ input }) => + `Summarize the following text${input.maxLength ? ` in under ${input.maxLength} words` : ""}:\n\n${input.text}`, + system: "You produce concise summaries.", +}); + +const result = await summarizer.generate({ + input: { text: "A very long article...", maxLength: 100 }, +}); +``` + +## Tools + +Pass a `tools` record. Tool names come from the object keys. See [Tools](tools.md). + +```ts +import { agent, tool } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const fetchPage = tool({ + description: "Fetch a web page by URL", + inputSchema: z.object({ url: z.url() }), + execute: async ({ url }) => { + const res = await fetch(url); + return { status: res.status, body: await res.text() }; + }, +}); + +const researcher = agent({ + name: "researcher", + model: openai("gpt-4.1"), + system: "You research topics by fetching web pages.", + tools: { fetchPage }, +}); +``` + +## Subagents + +Pass an `agents` record. Each subagent is automatically wrapped as a delegatable tool the parent agent can invoke through function calling. Abort signals propagate from parent to child. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const writer = agent({ + name: "writer", + model: openai("gpt-4.1"), + input: z.object({ topic: z.string() }), + prompt: ({ input }) => `Write an article about ${input.topic}`, +}); + +const editor = agent({ + name: "editor", + model: openai("gpt-4.1"), + system: "You review and improve articles. Delegate writing to the writer agent.", + agents: { writer }, +}); +``` + +## Output strategies + +Pass an `output` config to get typed structured output instead of a plain string. Accepts AI SDK `Output` strategies or raw Zod schemas (auto-wrapped). + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { Output } from "ai"; +import { z } from "zod"; + +// Zod schema — auto-wrapped as Output.object() +const classifier = agent({ + name: "classifier", + model: openai("gpt-4.1"), + output: z.object({ + category: z.enum(["bug", "feature", "question"]), + confidence: z.number(), + }), + input: z.object({ title: z.string(), body: z.string() }), + prompt: ({ input }) => `Classify this issue:\n\nTitle: ${input.title}\nBody: ${input.body}`, +}); + +// Output.array() directly +const tagger = agent({ + name: "tagger", + model: openai("gpt-4.1"), + output: Output.array({ element: z.object({ tag: z.string(), score: z.number() }) }), + system: "Extract tags from the text.", +}); +``` + +Accepted output values: + +| Value | Description | +| ---------------------------- | ------------------------------------------- | +| `Output.text()` | Plain string (default when omitted) | +| `Output.object({ schema })` | Validated structured object | +| `Output.array({ element })` | Validated array | +| `Output.choice({ options })` | Enum/classification | +| `z.object({ ... })` | Auto-wrapped as `Output.object({ schema })` | +| `z.array(z.object({ ... }))` | Auto-wrapped as `Output.array({ element })` | + +## Streaming + +Use `.stream()` for incremental text delivery. The result contains `fullStream` (an `AsyncIterableStream`) for live events, plus `output` and `messages` as promises that resolve after the stream completes. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; + +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", +}); + +const result = await helper.stream({ prompt: "Explain async/await in detail" }); + +if (result.ok) { + // Consume stream events as they arrive + for await (const part of result.fullStream) { + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } + } + + // Await final output and messages after stream completes + const finalOutput = await result.output; + const messages = await result.messages; +} +``` + +## Per-call overrides + +Override model, system prompt, tools, output, and hooks for a single call without changing the agent definition. Per-call hooks **merge** with base hooks — base fires first, then call-level. + +```ts +import { anthropic } from "@ai-sdk/anthropic"; + +const result = await helper.generate({ + prompt: "Explain monads", + model: anthropic("claude-sonnet-4-20250514"), + system: "You explain concepts using simple analogies.", + maxSteps: 5, + onStart: ({ input }) => console.log("Starting with:", input), + onFinish: ({ result, duration }) => console.log(`Done in ${duration}ms`), +}); +``` + +### Cancellation + +Pass an `AbortController` signal via per-call overrides. + +```ts +const controller = new AbortController(); +setTimeout(() => controller.abort(), 10_000); + +const result = await helper.generate({ + prompt: "Explain quantum computing", + signal: controller.signal, +}); + +if (!result.ok) { + console.error(result.error.code); // 'AGENT_ERROR' +} +``` + +## Export as a plain function + +Use `.fn()` for clean single-function exports. The returned function has the same signature as `.generate()`. + +```ts +export const summarize = summarizer.fn(); + +// Callers use it like a regular async function +const result = await summarize({ input: { text: "...", maxLength: 50 } }); +``` + +--- + +## Reference: `agent()` signature + +```ts +function agent( + config: AgentConfig, +): Agent; +``` + +## Reference: AgentConfig + +| Field | Required | Type | Description | +| -------------- | -------- | ------------------------------------------------------------------------------------ | ----------------------------------------------------------- | +| `name` | Yes | `string` | Unique agent name (used in logs, traces, hooks) | +| `model` | Yes | `Resolver` | Model instance or resolver function | +| `input` | No | `ZodType` | Zod schema for typed input (requires `prompt`) | +| `prompt` | No | `(params: { input: TInput }) => string \| Message[] \| Promise` | Render typed input into the model prompt (requires `input`) | +| `system` | No | `Resolver` | System prompt (static string or resolver function) | +| `tools` | No | `Resolver` | Tools for function calling | +| `agents` | No | `Resolver` | Subagents, auto-wrapped as callable tools | +| `maxSteps` | No | `Resolver` | Max tool-loop iterations (default: `20`) | +| `output` | No | `OutputParam` | Output type strategy | +| `logger` | No | `Resolver` | Pino-compatible logger | +| `onStart` | No | `(event: { input }) => void \| Promise` | Hook: fires when the agent starts | +| `onFinish` | No | `(event: { input, result, duration }) => void \| Promise` | Hook: fires on success | +| `onError` | No | `(event: { input, error }) => void \| Promise` | Hook: fires on error | +| `onStepFinish` | No | `(event: { stepId }) => void \| Promise` | Hook: fires after each tool-loop step | + +### Two modes + +| Config | `.generate()` params | How prompt is built | +| ---------------------- | ----------------------------------------------------------------------------- | ------------------------------ | +| `input` + `prompt` set | `{ input: TInput, ...overrides }` | `prompt({ input })` renders it | +| Both omitted | `{ prompt: string, ...overrides }` or `{ messages: Message[], ...overrides }` | Passed directly to the model | + +## Reference: Agent interface + +```ts +interface Agent { + readonly model: TModel; + generate( + params: GenerateParams, + ): Promise>>; + stream( + params: GenerateParams, + ): Promise>>; + fn(): ( + params: GenerateParams, + ) => Promise>>; +} +``` + +`GenerateParams` combines input and per-call overrides into a single object. Input is specified via exactly one of `prompt`, `messages`, or `input`. + +## Reference: GenerateResult + +```ts +interface GenerateResult { + output: TOutput; // the generation output + messages: Message[]; // full message history including tool calls + usage: TokenUsage; // aggregated token usage across all tool-loop steps + finishReason: string; // why the model stopped ('stop', 'length', 'tool-calls', etc.) +} +``` + +## Reference: StreamResult + +```ts +interface StreamResult { + output: Promise; // resolves after stream completes + messages: Promise; // resolves after stream completes + usage: Promise; // resolves after stream completes + finishReason: Promise; // resolves after stream completes + fullStream: AsyncIterableStream; // live stream events + toTextStreamResponse(init?: ResponseInit): Response; // SSE text stream + toUIMessageStreamResponse(options?: unknown): Response; // UI message stream +} +``` + +## Reference: Per-Call Overrides (GenerateParams) + +Per-call overrides are passed as fields in the `GenerateParams` object alongside the input. Override fields replace the base config for that call only. + +| Field | Type | Description | +| -------------- | --------------------------------------------- | ------------------------------- | +| `model` | `Model` | Override the model | +| `system` | `string \| ((params) => string)` | Override the system prompt | +| `tools` | `Partial & Record` | Merge with base tools | +| `agents` | `Partial & Record` | Merge with base subagents | +| `maxSteps` | `number` | Override max tool-loop steps | +| `output` | `OutputParam` | Override the output strategy | +| `signal` | `AbortSignal` | Abort signal for cancellation | +| `timeout` | `number` | Timeout in milliseconds | +| `logger` | `Logger` | Override the logger | +| `onStart` | hook | Per-call hook, fires after base | +| `onFinish` | hook | Per-call hook, fires after base | +| `onError` | hook | Per-call hook, fires after base | +| `onStepFinish` | hook | Per-call hook, fires after base | + +--- + +## Troubleshooting + +### Agent has `input` schema but no `prompt` function + +Provide both `input` and `prompt`, or omit both for simple mode. + +### Agent has `prompt` function but no `input` schema + +Provide both `input` and `prompt`, or omit both for simple mode. + +### Input validation failed + +Check that the input matches the Zod schema. Ensure all required fields are present and types are correct. + +--- + +## See also + +- [Tools](tools.md) +- [Create a Flow Agent](create-flow-agent.md) +- [Cost Tracking](cost-tracking.md) +- [Custom Flow Engine](custom-flow-engine.md) +- [Hooks](hooks.md) +- [Troubleshooting](troubleshooting.md) diff --git a/packages/agents/docs/create-flow-agent.md b/packages/agents/docs/create-flow-agent.md new file mode 100644 index 0000000..3556577 --- /dev/null +++ b/packages/agents/docs/create-flow-agent.md @@ -0,0 +1,542 @@ +# Create a Flow Agent + +`flowAgent()` creates a `FlowAgent` from a configuration object and an imperative handler function. The handler IS the flow agent — no step arrays, no definition objects. State is just variables. `$` is passed in for tracked operations. + +## Basic flow agent + +A flow agent has a typed `input` Zod schema and an optional `output` Zod schema, plus a handler function. The handler receives validated input and a `$` step builder for tracked operations. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; + +const myFlowAgent = flowAgent( + { + name: "data-processor", + input: z.object({ url: z.url() }), + output: z.object({ title: z.string(), wordCount: z.number() }), + }, + async ({ input, $ }) => { + const page = await $.step({ + id: "fetch-page", + execute: async () => { + const res = await fetch(input.url); + return await res.text(); + }, + }); + + if (!page.ok) throw new Error(page.error.message); + + return { + title: input.url, + wordCount: page.value.split(/\s+/).length, + }; + }, +); + +const result = await myFlowAgent.generate({ input: { url: "https://example.com" } }); +if (result.ok) { + console.log(result.output); // validated output + console.log(result.trace); // frozen execution trace tree + console.log(result.usage); // aggregated token usage from all $.agent() calls + console.log(result.duration); // wall-clock time in ms +} +``` + +## `$` operations + +Every `$` method is registered in the execution trace and returns a `StepResult`. Always check `.ok` before accessing `.value`. + +```ts +if (result.ok) { + console.log(result.value); // the step's return value + console.log(result.duration); // wall-clock time in ms +} else { + console.error(result.error.message); + console.error(result.error.stepId); +} +``` + +### `$.step` — single unit of work + +The fundamental tracked operation. The `execute` callback receives a nested `$` for further composition. + +```ts +const result = await $.step({ + id: "process-data", + execute: async ({ $ }) => { + const sub = await $.step({ + id: "sub-task", + execute: async () => computeResult(), + }); + return sub.ok ? sub.value : fallback; + }, +}); +``` + +### `$.agent` — tracked agent call + +Run an `agent()` as a tracked step. The framework records the agent name, input, output, and token usage in the trace. + +```ts +import { agent, flowAgent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const analyzer = agent({ + name: "analyzer", + model: openai("gpt-4.1"), + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Analyze this text:\n\n${input.text}`, +}); + +const pipeline = flowAgent( + { + name: "analysis-pipeline", + input: z.object({ content: z.string() }), + output: z.object({ analysis: z.string() }), + }, + async ({ input, $ }) => { + const result = await $.agent({ + id: "analyze-content", + agent: analyzer, + input: { text: input.content }, + }); + + if (!result.ok) throw new Error(result.error.message); + + return { analysis: result.value.output }; + }, +); +``` + +### `$.map` — parallel processing + +Process an array of items concurrently. Results are returned in input order. Use `concurrency` to limit parallelism. + +```ts +const pages = await $.map({ + id: "fetch-pages", + input: urls, + concurrency: 3, + execute: async ({ item: url, index, $ }) => { + const res = await fetch(url); + return { url, status: res.status, body: await res.text() }; + }, +}); + +if (pages.ok) { + console.log(pages.value); // array of results in input order +} +``` + +### `$.all` — heterogeneous concurrent operations + +Runs multiple independent operations concurrently, like `Promise.all`. Entries are **factory functions** that receive an `AbortSignal` and return a promise — not pre-started promises. + +```ts +const results = await $.all({ + id: "parallel-tasks", + entries: [ + (signal) => fetchMetadata(signal), + (signal) => fetchContent(signal), + (signal) => + $.step({ + id: "compute", + execute: async () => heavyComputation(), + }), + ], +}); + +if (results.ok) { + const [metadata, content, computed] = results.value; +} +``` + +### `$.race` — first-to-finish + +Runs multiple operations concurrently and returns the first to resolve. Losers are cancelled via abort signal. Entries follow the same factory function pattern as `$.all`. + +```ts +const fastest = await $.race({ + id: "fastest-source", + entries: [(signal) => fetchFromCDN(signal), (signal) => fetchFromOrigin(signal)], +}); + +if (fastest.ok) { + console.log(fastest.value); // result from whichever finished first +} +``` + +### Additional step types + +| Method | Description | +| ---------- | ------------------------------------------------------------------- | +| `$.each` | Sequential side effects. Returns `void`. | +| `$.reduce` | Sequential accumulation. Each step depends on the previous result. | +| `$.while` | Conditional loop. Runs while a condition holds. Returns last value. | + +```ts +// $.each — sequential side effects +await $.each({ + id: "notify-users", + input: users, + execute: async ({ item: user }) => { + await sendNotification(user.email, message); + }, +}); + +// $.reduce — sequential accumulation +const total = await $.reduce({ + id: "aggregate-scores", + input: items, + initial: 0, + execute: async ({ item, accumulator }) => { + return accumulator + item.score; + }, +}); + +// $.while — conditional loop +const converged = await $.while({ + id: "iterate-until-stable", + condition: ({ value, index }) => index < 10 && (value === undefined || value.delta > 0.01), + execute: async ({ index }) => { + return await computeIteration(index); + }, +}); +``` + +## Trace + +Every `$` operation produces a `TraceEntry`. Nested operations appear as `children`, forming a tree that represents the full execution graph. After execution completes, the trace is deep-cloned and frozen via `snapshotTrace()`. + +```ts +const result = await myFlowAgent.generate({ input }); + +if (result.ok) { + for (const entry of result.trace) { + const duration = (entry.finishedAt ?? 0) - entry.startedAt; + console.log(entry.id, entry.type, duration); + } +} +``` + +Walk the trace recursively to inspect nested operations: + +```ts +function walkTrace(entries: readonly TraceEntry[], depth = 0): void { + for (const entry of entries) { + const indent = " ".repeat(depth); + const duration = (entry.finishedAt ?? 0) - entry.startedAt; + console.log(`${indent}${entry.type}(${entry.id}) ${duration}ms`); + if (entry.children) { + walkTrace(entry.children, depth + 1); + } + } +} + +if (result.ok) { + walkTrace(result.trace); +} +``` + +Use `collectUsages()` to recursively extract all `TokenUsage` values from the trace tree: + +```ts +import { collectUsages } from "@funkai/agents"; + +if (result.ok) { + const usages = collectUsages(result.trace); + const totalTokens = usages.reduce((sum, u) => sum + u.inputTokens + u.outputTokens, 0); + console.log("Total tokens:", totalTokens); +} +``` + +See [Cost Tracking](cost-tracking.md) for full usage aggregation and cost calculation. + +## Typed I/O + +The `input` Zod schema is required on a flow agent. The `output` schema is optional — when provided, the handler must return a value that satisfies it (validation runs before the result is returned to the caller). When omitted, the handler returns `void` and the collected text from sub-agent responses becomes a `string` output. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; + +const InputSchema = z.object({ + urls: z.array(z.url()), +}); + +const OutputSchema = z.object({ + summaries: z.array(z.object({ url: z.string(), summary: z.string() })), +}); + +const pipeline = flowAgent( + { + name: "summarize-pages", + input: InputSchema, + output: OutputSchema, + }, + async ({ input, $ }) => { + // handler must return OutputSchema-compatible value + return { summaries: [] }; + }, +); +``` + +## Streaming step progress + +Use `.stream()` to receive `StepEvent` objects as the flow agent executes. + +```ts +const result = await myFlowAgent.stream({ input: { url: "https://example.com" } }); + +if (result.ok) { + for await (const event of result.fullStream) { + switch (event.type) { + case "step:start": + console.log(`Step started: ${event.step.id}`); + break; + case "step:finish": + console.log(`Step finished: ${event.step.id} (${event.duration}ms)`); + break; + case "step:error": + console.error(`Step failed: ${event.step.id}`, event.error); + break; + case "flow:finish": + console.log(`Flow agent complete (${event.duration}ms)`); + break; + } + } + + // Final output resolves after stream completes + const output = await result.output; +} +``` + +## Hooks for observability + +```ts +const wf = flowAgent( + { + name: "observed-flow-agent", + input: InputSchema, + output: OutputSchema, + onStart: ({ input }) => console.log("Flow agent started"), + onFinish: ({ input, result, duration }) => console.log(`Done in ${duration}ms`), + onError: ({ input, error }) => console.error("Failed:", error.message), + onStepStart: ({ step }) => console.log(`Step ${step.id} started`), + onStepFinish: ({ step, result, duration }) => + console.log(`Step ${step.id} done in ${duration}ms`), + }, + handler, +); +``` + +## Export as a plain function + +Use `.fn()` for clean single-function exports. + +```ts +export const processData = myFlowAgent.fn(); + +// Callers use it like a regular async function +const result = await processData({ input: { url: "https://example.com" } }); +``` + +## Full example + +```ts +import { agent, flowAgent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const summarizer = agent({ + name: "summarizer", + model: openai("gpt-4.1"), + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Summarize:\n\n${input.text}`, +}); + +const pipeline = flowAgent( + { + name: "summarize-pages", + input: z.object({ urls: z.array(z.url()) }), + output: z.object({ + summaries: z.array(z.object({ url: z.string(), summary: z.string() })), + }), + }, + async ({ input, $ }) => { + // Fetch all pages in parallel + const pages = await $.map({ + id: "fetch-pages", + input: input.urls, + concurrency: 5, + execute: async ({ item: url }) => { + const res = await fetch(url); + return { url, body: await res.text() }; + }, + }); + + if (!pages.ok) throw new Error("Failed to fetch pages"); + + // Summarize each page with the agent + const summaries = await $.map({ + id: "summarize-pages", + input: pages.value, + concurrency: 3, + execute: async ({ item: page, $ }) => { + const result = await $.agent({ + id: `summarize-${page.url}`, + agent: summarizer, + input: { text: page.body }, + }); + if (!result.ok) throw new Error(`Failed to summarize ${page.url}`); + return { url: page.url, summary: result.value.output }; + }, + }); + + if (!summaries.ok) throw new Error("Failed to summarize"); + + return { summaries: summaries.value }; + }, +); + +export const summarizePages = pipeline.fn(); +``` + +--- + +## Reference: `flowAgent()` signature + +```ts +// With structured output +function flowAgent( + config: FlowAgentConfigWithOutput, + handler: FlowAgentHandler, +): FlowAgent; + +// Without output schema (void handler, string output) +function flowAgent( + config: FlowAgentConfigWithoutOutput, + handler: FlowAgentHandler, +): FlowAgent; +``` + +## Reference: FlowAgentConfig + +| Field | Required | Type | Description | +| -------------- | -------- | --------------------------------------------------------------- | --------------------------------------------------------- | +| `name` | Yes | `string` | Unique flow agent name (used in logs, traces) | +| `input` | Yes | `ZodType` | Zod schema for validating input | +| `output` | No | `ZodType` | Zod schema for validating output (omit for string output) | +| `agents` | No | `FlowSubAgents` | Record of agents available to `$.agent()` | +| `logger` | No | `Resolver` | Pino-compatible logger | +| `onStart` | No | `(event: { input }) => void \| Promise` | Hook: fires when the flow agent starts | +| `onFinish` | No | `(event: { input, result, duration }) => void \| Promise` | Hook: fires on success | +| `onError` | No | `(event: { input, error }) => void \| Promise` | Hook: fires on error | +| `onStepStart` | No | `(event: { step: StepInfo }) => void \| Promise` | Hook: fires when any `$` step starts | +| `onStepFinish` | No | `(event: { step, result, duration }) => void \| Promise` | Hook: fires when any `$` step finishes | + +## Reference: FlowAgentGenerateResult + +```ts +interface FlowAgentGenerateResult { + output: TOutput; // validated output + messages: Message[]; // full message history + finishReason: string; // why the model stopped + trace: readonly TraceEntry[]; // frozen execution trace tree + usage: TokenUsage; // aggregated token usage from all $.agent() calls + duration: number; // wall-clock time in ms +} +``` + +## Reference: TraceEntry + +```ts +interface TraceEntry { + id: string; + type: OperationType; + input?: unknown; + output?: unknown; + startedAt: number; + finishedAt?: number; + error?: Error; + usage?: TokenUsage; + children?: readonly TraceEntry[]; +} +``` + +| Field | Type | Description | +| ------------ | --------------- | ----------------------------------------------------------- | +| `id` | `string` | Unique id from the `$` config that produced this entry | +| `type` | `OperationType` | What kind of operation produced this entry | +| `input` | `unknown` | Input snapshot captured when the operation starts | +| `output` | `unknown` | Output snapshot captured on success | +| `startedAt` | `number` | Start time in Unix milliseconds | +| `finishedAt` | `number` | End time in Unix milliseconds (`undefined` while running) | +| `error` | `Error` | Error instance if the operation failed | +| `usage` | `TokenUsage` | Token usage (populated for successful `agent` type entries) | +| `children` | `TraceEntry[]` | Nested trace entries for child operations | + +### OperationType values + +| Value | Source | Description | +| ---------- | ------------ | ----------------------------------- | +| `"step"` | `$.step()` | Single unit of work | +| `"agent"` | `$.agent()` | Agent generation call | +| `"map"` | `$.map()` | Parallel map operation | +| `"each"` | `$.each()` | Sequential side effects | +| `"reduce"` | `$.reduce()` | Sequential accumulation | +| `"while"` | `$.while()` | Conditional loop | +| `"all"` | `$.all()` | Concurrent heterogeneous operations | +| `"race"` | `$.race()` | First-to-finish race | + +## Reference: StepEvent (stream) + +Events emitted on the flow agent stream: + +| Type | Fields | Description | +| ------------- | ---------------------------- | ------------------------- | +| `step:start` | `step: StepInfo` | A `$` operation started | +| `step:finish` | `step`, `result`, `duration` | A `$` operation completed | +| `step:error` | `step`, `error` | A `$` operation failed | +| `flow:finish` | `output`, `duration` | The flow agent completed | + +## Reference: FlowAgentOverrides + +| Field | Type | Description | +| -------- | ------------- | ----------------------------- | +| `signal` | `AbortSignal` | Abort signal for cancellation | + +When the signal fires, all in-flight `$` operations check `signal.aborted` and abort. The signal propagates through the entire execution tree. + +--- + +## Troubleshooting + +### Input validation failed + +Check that the input matches the `input` Zod schema. Ensure all required fields are present and types are correct. + +### Output validation failed + +Ensure the handler returns an object matching the `output` Zod schema. + +### Step result not checked + +All `$` methods return `StepResult` — check `.ok` before accessing `.value`. + +### `$.all`/`$.race` type error + +Entries must be factory functions `(signal) => Promise`, not pre-started promises. + +--- + +## See also + +- [Create an Agent](create-agent.md) +- [Tools](tools.md) +- [Custom Flow Engine](custom-flow-engine.md) +- [Cost Tracking](cost-tracking.md) +- [Hooks](hooks.md) +- [Troubleshooting](troubleshooting.md) diff --git a/packages/agents/docs/custom-flow-engine.md b/packages/agents/docs/custom-flow-engine.md new file mode 100644 index 0000000..9cef1c1 --- /dev/null +++ b/packages/agents/docs/custom-flow-engine.md @@ -0,0 +1,302 @@ +# Custom Flow Engine + +`createFlowEngine()` builds a custom flow agent factory with additional step types merged into `$` and engine-level default hooks. Custom steps receive an `ExecutionContext` for cancellation and logging, and are fully typed on the handler's `$` parameter. + +The engine returns a `FlowFactory` — a function with the same signature as `flowAgent()` but with custom steps and hooks baked in. All flow agents created from the factory share the custom `$` methods and engine-level hooks. + +## Basic custom step + +```ts +import { createFlowEngine } from "@funkai/agents"; +import { z } from "zod"; + +const engine = createFlowEngine({ + $: { + fetch: async ({ ctx, config }) => { + const response = await fetch(config.url, { signal: ctx.signal }); + ctx.log.info("Fetched URL", { url: config.url }); + return response.json(); + }, + }, +}); + +const pipeline = engine( + { + name: "data-pipeline", + input: z.object({ endpoint: z.string() }), + output: z.object({ data: z.unknown() }), + }, + async ({ input, $ }) => { + // $.fetch is fully typed from the engine config + const data = await $.fetch({ url: input.endpoint }); + return { data }; + }, +); +``` + +## ExecutionContext in custom steps + +Custom step factories receive `ExecutionContext` through their `ctx` param. Use `ctx.signal` for cooperative cancellation and `ctx.log` for scoped logging. + +```ts +const engine = createFlowEngine({ + $: { + fetchWithLogging: async ({ ctx, config }) => { + const response = await fetch(config.url, { + signal: ctx.signal, + }); + ctx.log.info("Fetched URL", { url: config.url, status: response.status }); + return response.json(); + }, + }, +}); +``` + +Check the signal before long operations to support cooperative cancellation: + +```ts +const engine = createFlowEngine({ + $: { + batchProcess: async ({ ctx, config }) => { + const results = []; + for (const item of config.items) { + if (ctx.signal.aborted) { + ctx.log.warn("Batch processing cancelled"); + break; + } + results.push(await processItem(item)); + } + return results; + }, + }, +}); +``` + +The logger is scoped automatically. Log output includes execution context without manual threading: + +```text +flowAgentId: "content-pipeline" + stepId: "fetch-sources" + agentId: "researcher" +``` + +## Retry step + +```ts +const engine = createFlowEngine({ + $: { + retry: async ({ ctx, config }) => { + let lastError: Error | undefined; + for (let attempt = 0; attempt < config.attempts; attempt++) { + if (ctx.signal.aborted) throw new Error("Aborted"); + try { + return await config.execute({ attempt }); + } catch (err) { + lastError = err as Error; + ctx.log.warn("Retry attempt failed", { attempt, error: lastError.message }); + await sleep(config.backoff * (attempt + 1)); + } + } + throw lastError; + }, + }, +}); + +const flow = engine( + { + name: "resilient-flow", + input: z.object({ query: z.string() }), + output: z.object({ answer: z.string() }), + }, + async ({ input, $ }) => { + const result = await $.retry({ + attempts: 3, + backoff: 1000, + execute: async ({ attempt }) => { + const res = await $.agent({ + id: `generate-${attempt}`, + agent: writer, + input: input.query, + }); + if (!res.ok) throw new Error(res.error.message); + return res.value.output; + }, + }); + return { answer: result }; + }, +); +``` + +## Timeout step + +```ts +const engine = createFlowEngine({ + $: { + timeout: async ({ ctx, config }) => { + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), config.ms); + + // Propagate parent cancellation into the timeout controller + ctx.signal.addEventListener("abort", () => controller.abort()); + + try { + return await config.execute({ signal: controller.signal }); + } finally { + clearTimeout(timer); + } + }, + }, +}); +``` + +## Engine-level hooks + +Attach telemetry or logging at the engine level so all flow agents created from the factory share the same hooks. Engine hooks fire first, then flow agent-level hooks fire second. Each hook is independently error-swallowed so one failure does not prevent others from running. + +```ts +const engine = createFlowEngine({ + onStart: ({ input }) => { + telemetry.trackStart(input); + }, + onFinish: ({ input, result, duration }) => { + telemetry.trackFinish({ input, duration }); + }, + onError: ({ error }) => { + errorReporter.capture(error); + }, + onStepStart: ({ step }) => { + telemetry.trackStepStart(step.id, step.type); + }, + onStepFinish: ({ step, duration }) => { + telemetry.trackStepFinish(step.id, duration); + }, +}); +``` + +## Combining custom steps and hooks + +```ts +const engine = createFlowEngine({ + $: { + retry: async ({ ctx, config }) => { + let lastError: Error | undefined; + for (let attempt = 0; attempt < config.attempts; attempt++) { + try { + return await config.execute({ attempt }); + } catch (err) { + lastError = err as Error; + ctx.log.warn("Retry failed", { attempt }); + } + } + throw lastError; + }, + validate: async ({ config }) => { + const parsed = config.schema.safeParse(config.data); + if (!parsed.success) throw new Error(parsed.error.message); + return parsed.data; + }, + }, + onStart: ({ input }) => metrics.increment("flow.started"), + onFinish: ({ duration }) => metrics.histogram("flow.duration", duration), +}); + +const myFlowAgent = engine( + { + name: "my-flow-agent", + input: MyInput, + output: MyOutput, + }, + async ({ input, $ }) => { + // Both $.retry and $.validate are typed + const data = await $.retry({ + attempts: 3, + execute: async () => fetchData(), + }); + const validated = await $.validate({ schema: DataSchema, data }); + return validated; + }, +); +``` + +--- + +## Reference: `createFlowEngine()` signature + +```ts +function createFlowEngine( + config: FlowEngineConfig, +): FlowFactory; +``` + +## Reference: FlowEngineConfig + +| Field | Type | Description | +| -------------- | ----------------------- | ------------------------------------------------ | +| `$` | `CustomStepDefinitions` | Custom step types to add to `$` | +| `onStart` | hook | Default hook: fires when any flow agent starts | +| `onFinish` | hook | Default hook: fires when any flow agent finishes | +| `onError` | hook | Default hook: fires when any flow agent errors | +| `onStepStart` | hook | Default hook: fires when any step starts | +| `onStepFinish` | hook | Default hook: fires when any step finishes | + +## Reference: CustomStepFactory + +The type for a custom step implementation: + +```ts +type CustomStepFactory = (params: { + ctx: ExecutionContext; + config: TConfig; +}) => Promise; +``` + +| Param | Type | Description | +| -------- | ------------------ | -------------------------------------------------- | +| `ctx` | `ExecutionContext` | Provides `signal` (AbortSignal) and `log` (Logger) | +| `config` | `TConfig` | The config object passed by the user at call site | + +## Reference: ExecutionContext + +The public context interface exposed to custom step factories: + +```ts +interface ExecutionContext { + readonly signal: AbortSignal; + readonly log: Logger; +} +``` + +| Field | Type | Description | +| -------- | ------------- | ----------------------------------------- | +| `signal` | `AbortSignal` | Abort signal for cooperative cancellation | +| `log` | `Logger` | Scoped logger with contextual bindings | + +### Signal propagation + +The abort signal cascades through the entire execution tree. When a flow agent receives a `signal` via overrides, it becomes the `signal` on the context. All nested `$` operations and sub-agent calls observe the same signal. + +```ts +const controller = new AbortController(); + +const result = await myFlowAgent.generate({ + input, + signal: controller.signal, +}); + +// Cancels all in-flight operations +controller.abort(); +``` + +## Reserved step names + +Custom steps cannot shadow built-in `StepBuilder` methods. The following names are reserved and will throw at engine creation time: + +`step`, `agent`, `map`, `each`, `reduce`, `while`, `all`, `race` + +--- + +## See also + +- [Create a Flow Agent](create-flow-agent.md) +- [Create an Agent](create-agent.md) +- [Hooks](hooks.md) diff --git a/packages/agents/docs/error-recovery.md b/packages/agents/docs/error-recovery.md new file mode 100644 index 0000000..07f2823 --- /dev/null +++ b/packages/agents/docs/error-recovery.md @@ -0,0 +1,348 @@ +# Error Recovery + +Patterns for building resilient agents and flow agents that recover gracefully from failures. + +## Prerequisites + +- `@funkai/agents` installed +- Familiarity with `flowAgent()`, `$.step`, `$.while`, `$.map`, and hooks +- Understanding of `StepResult` and `Result` types + +## Steps + +### 1. Use fallback values on step failure + +Every `$` method returns `StepResult` with an `ok` field. Check it before accessing `.value` and provide a fallback when the step fails. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; + +const resilient = flowAgent( + { + name: "resilient-fetch", + input: z.object({ url: z.string() }), + output: z.object({ body: z.string(), source: z.string() }), + }, + async ({ input, $ }) => { + const primary = await $.step({ + id: "fetch-primary", + execute: async () => { + const res = await fetch(input.url); + if (!res.ok) throw new Error(`HTTP ${res.status}`); + return await res.text(); + }, + }); + + if (primary.ok) { + return { body: primary.value, source: "primary" }; + } + + // Fallback to a cached or default value + const fallback = await $.step({ + id: "fetch-fallback", + execute: async () => { + const res = await fetch(`${input.url}?cached=true`); + return await res.text(); + }, + }); + + return { + body: fallback.ok ? fallback.value : "Service unavailable", + source: fallback.ok ? "fallback" : "default", + }; + }, +); +``` + +### 2. Retry with `$.while` + +Use `$.while` for retry logic with a bounded iteration count. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; + +const retryable = flowAgent( + { + name: "retry-fetch", + input: z.object({ url: z.string(), maxRetries: z.number().default(3) }), + output: z.object({ body: z.string(), attempts: z.number() }), + }, + async ({ input, $ }) => { + const result = await $.while({ + id: "retry-loop", + condition: ({ value, index }) => + index < input.maxRetries && (value === undefined || !value.ok), + execute: async ({ index }) => { + if (index > 0) { + await new Promise((resolve) => setTimeout(resolve, 1000 * index)); + } + try { + const res = await fetch(input.url); + if (!res.ok) throw new Error(`HTTP ${res.status}`); + return { ok: true as const, body: await res.text(), attempt: index + 1 }; + } catch { + return { ok: false as const, body: "", attempt: index + 1 }; + } + }, + }); + + const last = result.ok ? result.value : undefined; + return { + body: last?.ok ? last.body : "All retries failed", + attempts: last?.attempt ?? 0, + }; + }, +); +``` + +### 3. Handle partial success with `$.map` + +When processing multiple items, some may fail while others succeed. Check each item's result independently. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const summarizer = agent({ + name: "summarizer", + model: openai("gpt-4.1"), + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Summarize briefly:\n\n${input.text}`, +}); + +const batchSummarizer = flowAgent( + { + name: "batch-summarize", + input: z.object({ texts: z.array(z.string()) }), + output: z.object({ + results: z.array(z.object({ index: z.number(), summary: z.string(), ok: z.boolean() })), + }), + }, + async ({ input, $ }) => { + const summaries = await $.map({ + id: "summarize-all", + input: input.texts, + concurrency: 3, + execute: async ({ item, index, $ }) => { + const result = await $.agent({ + id: `summarize-${index}`, + agent: summarizer, + input: { text: item }, + }); + return { + index, + summary: result.ok ? result.value.output : "Failed to summarize", + ok: result.ok, + }; + }, + }); + + return { + results: summaries.ok + ? summaries.value + : input.texts.map((_, index) => ({ + index, + summary: "Batch processing failed", + ok: false, + })), + }; + }, +); +``` + +### 4. Build a circuit breaker with `$.reduce` + +Track consecutive failures and stop processing when a threshold is reached. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; + +interface CircuitState { + readonly failures: number; + readonly results: readonly string[]; + readonly tripped: boolean; +} + +const circuitBreaker = flowAgent( + { + name: "circuit-breaker", + input: z.object({ urls: z.array(z.string()), maxFailures: z.number().default(3) }), + output: z.object({ results: z.array(z.string()), tripped: z.boolean() }), + }, + async ({ input, $ }) => { + const initial: CircuitState = { failures: 0, results: [], tripped: false }; + + const state = await $.reduce({ + id: "process-with-circuit", + input: input.urls, + initial, + execute: async ({ item: url, accumulator }) => { + if (accumulator.tripped) { + return { ...accumulator, results: [...accumulator.results, "skipped"] }; + } + try { + const res = await fetch(url); + if (!res.ok) throw new Error(`HTTP ${res.status}`); + const body = await res.text(); + return { failures: 0, results: [...accumulator.results, body], tripped: false }; + } catch { + const newFailures = accumulator.failures + 1; + return { + failures: newFailures, + results: [...accumulator.results, "error"], + tripped: newFailures >= input.maxFailures, + }; + } + }, + }); + + return { + results: state.ok ? [...state.value.results] : [], + tripped: state.ok ? state.value.tripped : true, + }; + }, +); +``` + +### 5. Log errors with hooks + +Use flow agent and step hooks to capture errors for logging and observability. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; + +const observed = flowAgent( + { + name: "observed-pipeline", + input: z.object({ data: z.string() }), + output: z.object({ result: z.string() }), + onError: ({ input, error }) => { + console.error(`Flow agent failed for input: ${JSON.stringify(input)}`, error.message); + }, + onStepFinish: ({ step, result, duration }) => { + if (result === undefined) { + console.warn(`Step ${step.id} failed after ${duration}ms`); + } + }, + }, + async ({ input, $ }) => { + const processed = await $.step({ + id: "process", + onError: ({ id, error }) => { + console.error(`Step ${id} error:`, error.message); + }, + execute: async () => input.data.toUpperCase(), + }); + + return { result: processed.ok ? processed.value : "fallback" }; + }, +); +``` + +### 6. Combine patterns for robust pipelines + +Chain fallback, retry, and logging into a single flow agent. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const analyzer = agent({ + name: "analyzer", + model: openai("gpt-4.1"), + input: z.object({ content: z.string() }), + prompt: ({ input }) => `Analyze this content:\n\n${input.content}`, +}); + +const robust = flowAgent( + { + name: "robust-analysis", + input: z.object({ url: z.string() }), + output: z.object({ analysis: z.string(), source: z.string() }), + onStepFinish: ({ step, result, duration }) => { + const status = result !== undefined ? "ok" : "error"; + console.log(`[${step.id}] ${status} (${duration}ms)`); + }, + }, + async ({ input, $ }) => { + const content = await $.while({ + id: "fetch-retry", + condition: ({ value, index }) => index < 3 && (value === undefined || !value.ok), + execute: async ({ index }) => { + if (index > 0) { + await new Promise((resolve) => setTimeout(resolve, 1000 * index)); + } + try { + const res = await fetch(input.url); + if (!res.ok) throw new Error(`HTTP ${res.status}`); + return { ok: true as const, body: await res.text() }; + } catch { + return { ok: false as const, body: "" }; + } + }, + }); + + const fetchedBody = content.ok && content.value?.ok ? content.value.body : undefined; + + if (!fetchedBody) { + return { analysis: "Unable to fetch content", source: "none" }; + } + + const result = await $.agent({ + id: "analyze", + agent: analyzer, + input: { content: fetchedBody }, + }); + + return { + analysis: result.ok ? result.value.output : "Analysis unavailable", + source: result.ok ? "agent" : "fallback", + }; + }, +); +``` + +## Verification + +- Failing steps return `StepResult` with `ok: false` instead of throwing +- Retry loops terminate within the configured bounds +- Partial success flow agents return results for both succeeded and failed items +- Hook errors are caught, logged, and discarded — they never mask the original step error +- Circuit breaker skips remaining items after the failure threshold + +## Troubleshooting + +### Retry loop runs forever + +**Issue:** The `$.while` condition never becomes false. + +**Fix:** Always include an `index < maxRetries` guard in the condition. + +### Hook errors masking step errors + +**Issue:** Expected error information is missing. + +**Fix:** Hook errors are caught and discarded by design (via `attemptEachAsync`) so they never mask step errors. The original step error is always preserved in the `StepResult`. Check your logger output for hook error details. + +### `$.map` fails on first error + +**Issue:** One item failure causes the entire `$.map` to fail. + +**Fix:** Catch errors inside the `execute` callback and return an error marker value instead of throwing. + +### Fallback step also fails + +**Issue:** Both primary and fallback steps fail, leaving no result. + +**Fix:** Always include a final default value that does not depend on external calls. + +## References + +- [`agent()` reference](/reference/agents/agent) +- [`flowAgent()` reference](/reference/agents/flow-agent) diff --git a/packages/agents/docs/guides/cost-aware-agents.md b/packages/agents/docs/guides/cost-aware-agents.md index 3676f5c..55d653c 100644 --- a/packages/agents/docs/guides/cost-aware-agents.md +++ b/packages/agents/docs/guides/cost-aware-agents.md @@ -1,12 +1,12 @@ # Build Cost-Aware Agents -Track token usage, calculate costs, enforce budgets, and optimize model selection for cost-efficient workflows. +Track token usage, calculate costs, enforce budgets, and optimize model selection for cost-efficient flow agents. ## Prerequisites - `@funkai/agents` installed - `@funkai/models` installed (provides `calculateCost`, `model`, `models`) -- Familiarity with `agent()`, `workflow()`, and hooks +- Familiarity with `agent()`, `flowAgent()`, and hooks ## Steps @@ -16,14 +16,15 @@ Every successful `agent.generate()` returns `result.usage` with resolved token c ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; const helper = agent({ name: "helper", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are a helpful assistant.", }); -const result = await helper.generate("What is TypeScript?"); +const result = await helper.generate({ prompt: "What is TypeScript?" }); if (result.ok) { console.log("Input tokens:", result.usage.inputTokens); @@ -41,15 +42,16 @@ Use `calculateCost()` to convert token counts into USD amounts. Look up model pr ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { calculateCost, model } from "@funkai/models"; const summarizer = agent({ name: "summarizer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You produce concise summaries.", }); -const result = await summarizer.generate("Summarize the history of TypeScript."); +const result = await summarizer.generate({ prompt: "Summarize the history of TypeScript." }); if (result.ok) { const modelDef = model("gpt-4.1"); @@ -69,9 +71,9 @@ Use the `onFinish` hook to track cumulative cost and abort when a budget is exce ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { calculateCost, model } from "@funkai/models"; -const modelId = "openai/gpt-4.1"; const modelDef = model("gpt-4.1"); let cumulativeCost = 0; @@ -79,7 +81,7 @@ const budgetLimit = 0.5; // $0.50 const helper = agent({ name: "budget-helper", - model: modelId, + model: openai("gpt-4.1"), system: "You are a helpful assistant.", onFinish: ({ result }) => { const cost = calculateCost(result.usage, modelDef.pricing); @@ -99,11 +101,13 @@ Use per-call overrides to select cheaper models for simple tasks and more capabl ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; +import type { LanguageModel } from "@funkai/models"; const assistant = agent({ name: "smart-assistant", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ question: z.string(), complexity: z.enum(["simple", "complex"]), @@ -111,8 +115,8 @@ const assistant = agent({ prompt: ({ input }) => input.question, }); -const selectModel = (complexity: "simple" | "complex"): string => - complexity === "simple" ? "openai/gpt-4.1-mini" : "openai/gpt-4.1"; +const selectModel = (complexity: "simple" | "complex"): LanguageModel => + complexity === "simple" ? openai("gpt-4.1-mini") : openai("gpt-4.1"); const result = await assistant.generate( { question: "What is 2 + 2?", complexity: "simple" }, @@ -120,23 +124,24 @@ const result = await assistant.generate( ); ``` -### 5. Aggregate workflow cost +### 5. Aggregate flow agent cost -Workflow results include `result.usage` with aggregated token counts from all `$.agent()` calls. Combine with `calculateCost()` for the total workflow cost. +Flow agent results include `result.usage` with aggregated token counts from all `$.agent()` calls. Combine with `calculateCost()` for the total flow agent cost. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { calculateCost, model } from "@funkai/models"; import { z } from "zod"; const analyzer = agent({ name: "analyzer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Analyze:\n\n${input.text}`, }); -const pipeline = workflow( +const pipeline = flowAgent( { name: "analysis-pipeline", input: z.object({ texts: z.array(z.string()) }), @@ -177,7 +182,7 @@ const result = await pipeline.generate({ texts: ["Text A", "Text B", "Text C"] } if (result.ok) { const modelDef = model("gpt-4.1"); const cost = calculateCost(result.usage, modelDef.pricing); - console.log(`Workflow total: ${result.usage.totalTokens} tokens, $${cost.total.toFixed(6)}`); + console.log(`Flow agent total: ${result.usage.totalTokens} tokens, $${cost.total.toFixed(6)}`); } ``` @@ -198,26 +203,26 @@ for (const m of sorted.slice(0, 5)) { } ``` -### 7. Log per-step costs in workflows +### 7. Log per-step costs in flow agents Use `onStepFinish` to calculate and log the cost of each agent step as it completes. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { calculateCost, model } from "@funkai/models"; import { z } from "zod"; -const modelId = "openai/gpt-4.1"; const modelDef = model("gpt-4.1"); const writer = agent({ name: "writer", - model: modelId, + model: openai("gpt-4.1"), input: z.object({ topic: z.string() }), prompt: ({ input }) => `Write about: ${input.topic}`, }); -const traced = workflow( +const traced = flowAgent( { name: "cost-traced", input: z.object({ topics: z.array(z.string()) }), @@ -256,7 +261,7 @@ const traced = workflow( - `result.usage` contains non-negative token counts for all fields - `calculateCost()` returns a `UsageCost` with `input`, `output`, `cacheRead`, `cacheWrite`, and `total` fields - Budget hooks fire after each successful generation -- Workflow `result.usage` aggregates all `$.agent()` calls +- Flow agent `result.usage` aggregates all `$.agent()` calls - `model()` throws for unknown IDs; use `models()` to list available models ## Troubleshooting @@ -279,7 +284,7 @@ const traced = workflow( **Fix:** Hooks are observability callbacks -- they cannot abort execution. To enforce a hard budget, check the cumulative cost before each call and skip or abort manually using an `AbortController`. -### Workflow usage does not include non-agent steps +### Flow agent usage does not include non-agent steps **Issue:** `result.usage` only includes tokens from `$.agent()` calls. @@ -291,4 +296,4 @@ const traced = workflow( - [Models](../provider/models.md) - [Hooks](../core/hooks.md) - [Create an Agent](create-agent.md) -- [Create a Workflow](create-workflow.md) +- [Create a Flow Agent](create-flow-agent.md) diff --git a/packages/agents/docs/guides/create-agent.md b/packages/agents/docs/guides/create-agent.md index 95f44d2..1ab545c 100644 --- a/packages/agents/docs/guides/create-agent.md +++ b/packages/agents/docs/guides/create-agent.md @@ -14,14 +14,15 @@ Pass a `name`, `model`, and optional `system` prompt. In simple mode, `.generate ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; const helper = agent({ name: "helper", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are a helpful assistant.", }); -const result = await helper.generate("What is TypeScript?"); +const result = await helper.generate({ prompt: "What is TypeScript?" }); if (result.ok) { console.log(result.output); // string } @@ -33,11 +34,12 @@ Add an `input` Zod schema and a `prompt` function. Both are required together. ` ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const summarizer = agent({ name: "summarizer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string(), maxLength: z.number().optional(), @@ -59,6 +61,7 @@ Pass a `tools` record. Tool names come from the object keys. See [Create a Tool] ```ts import { agent, tool } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const fetchPage = tool({ @@ -72,7 +75,7 @@ const fetchPage = tool({ const researcher = agent({ name: "researcher", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You research topics by fetching web pages.", tools: { fetchPage }, }); @@ -85,14 +88,14 @@ Pass an `agents` record. Each subagent is auto-wrapped as a delegatable tool. Ab ```ts const writer = agent({ name: "writer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ topic: z.string() }), prompt: ({ input }) => `Write an article about ${input.topic}`, }); const editor = agent({ name: "editor", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You review and improve articles. Delegate writing to the writer agent.", agents: { writer }, }); @@ -104,11 +107,12 @@ Pass an `output` config to get typed structured output instead of a string. Acce ```ts import { Output } from "ai"; +import { openai } from "@ai-sdk/openai"; // Zod schema auto-wrapped as Output.object() const classifier = agent({ name: "classifier", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), output: z.object({ category: z.enum(["bug", "feature", "question"]), confidence: z.number(), @@ -120,7 +124,7 @@ const classifier = agent({ // Or use Output directly const tagger = agent({ name: "tagger", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), output: Output.array({ element: z.object({ tag: z.string(), score: z.number() }) }), system: "Extract tags from the text.", }); @@ -131,11 +135,11 @@ const tagger = agent({ Use `.stream()` for incremental text delivery. The result contains a `ReadableStream` for live chunks, plus `output` and `messages` as promises that resolve after the stream completes. ```ts -const result = await helper.stream("Explain async/await in detail"); +const result = await helper.stream({ prompt: "Explain async/await in detail" }); if (result.ok) { // Consume text chunks as they arrive - const reader = result.stream.getReader(); + const reader = result.fullStream.getReader(); while (true) { const { done, value } = await reader.read(); if (done) break; @@ -169,7 +173,8 @@ const controller = new AbortController(); // Cancel after 10 seconds setTimeout(() => controller.abort(), 10_000); -const result = await helper.generate("Explain quantum computing", { +const result = await helper.generate({ + prompt: "Explain quantum computing", signal: controller.signal, }); @@ -183,8 +188,9 @@ if (!result.ok) { Override model, system prompt, tools, output, and hooks for a single call without changing the agent definition. ```ts -const result = await helper.generate("Explain monads", { - model: "anthropic/claude-sonnet-4-20250514", +const result = await helper.generate({ + prompt: "Explain monads", + model: anthropic("claude-sonnet-4-20250514"), system: "You explain concepts using simple analogies.", maxSteps: 5, onStart: ({ input }) => console.log("Starting with:", input), @@ -219,6 +225,6 @@ const result = await helper.generate("Explain monads", { ## References - [Create a Tool](create-tool.md) -- [Create a Workflow](create-workflow.md) +- [Create a Flow Agent](create-flow-agent.md) - [Provider Overview](../provider/overview.md) - [Troubleshooting](../troubleshooting.md) diff --git a/packages/agents/docs/guides/create-workflow.md b/packages/agents/docs/guides/create-flow-agent.md similarity index 87% rename from packages/agents/docs/guides/create-workflow.md rename to packages/agents/docs/guides/create-flow-agent.md index aff0eee..98222fe 100644 --- a/packages/agents/docs/guides/create-workflow.md +++ b/packages/agents/docs/guides/create-flow-agent.md @@ -1,4 +1,4 @@ -# Create a Workflow +# Create a Flow Agent ## Prerequisites @@ -8,15 +8,15 @@ ## Steps -### 1. Define a basic workflow +### 1. Define a basic flow agent -A workflow has typed `input` and `output` Zod schemas and a handler function. The handler receives validated input and a `$` step builder for tracked operations. +A flow agent has typed `input` and `output` Zod schemas and a handler function. The handler receives validated input and a `$` step builder for tracked operations. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; -const myWorkflow = workflow( +const myFlowAgent = flowAgent( { name: "data-processor", input: z.object({ url: z.url() }), @@ -73,19 +73,20 @@ if (result.ok) { ### 3. Use `$.agent` for agent calls -Run an agent as a tracked workflow step. The framework records the agent name, input, and output in the trace. +Run an agent as a tracked flow agent step. The framework records the agent name, input, and output in the trace. ```ts -import { agent } from "@funkai/agents"; +import { agent, flowAgent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; const analyzer = agent({ name: "analyzer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Analyze this text:\n\n${input.text}`, }); -const wf = workflow( +const wf = flowAgent( { name: "analysis-pipeline", input: z.object({ content: z.string() }), @@ -205,13 +206,13 @@ const converged = await $.while({ ### 8. Stream step progress events -Use `.stream()` to receive `StepEvent` objects as the workflow executes. +Use `.stream()` to receive `StepEvent` objects as the flow agent executes. ```ts -const result = await myWorkflow.stream({ url: "https://example.com" }); +const result = await myFlowAgent.stream({ url: "https://example.com" }); if (result.ok) { - const reader = result.stream.getReader(); + const reader = result.fullStream.getReader(); while (true) { const { done, value: event } = await reader.read(); if (done) break; @@ -226,8 +227,8 @@ if (result.ok) { case "step:error": console.error(`Step failed: ${event.step.id}`, event.error); break; - case "workflow:finish": - console.log(`Workflow complete (${event.duration}ms)`); + case "flow:finish": + console.log(`Flow agent complete (${event.duration}ms)`); break; } } @@ -243,7 +244,7 @@ if (result.ok) { Use `.fn()` for clean single-function exports. ```ts -export const processData = myWorkflow.fn(); +export const processData = myFlowAgent.fn(); // Callers use it like a regular async function const result = await processData({ url: "https://example.com" }); @@ -252,12 +253,12 @@ const result = await processData({ url: "https://example.com" }); ### 10. Add hooks for observability ```ts -const wf = workflow( +const wf = flowAgent( { - name: "observed-workflow", + name: "observed-flow-agent", input: InputSchema, output: OutputSchema, - onStart: ({ input }) => console.log("Workflow started"), + onStart: ({ input }) => console.log("Flow agent started"), onFinish: ({ input, output, duration }) => console.log(`Done in ${duration}ms`), onError: ({ input, error }) => console.error("Failed:", error.message), onStepStart: ({ step }) => console.log(`Step ${step.id} started`), @@ -271,7 +272,8 @@ const wf = workflow( ## Full example ```ts -import { agent, workflow, tool } from "@funkai/agents"; +import { agent, flowAgent, tool } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; // Define tools @@ -287,13 +289,13 @@ const fetchPage = tool({ // Define agents const summarizer = agent({ name: "summarizer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Summarize:\n\n${input.text}`, }); -// Define workflow -const pipeline = workflow( +// Define flow agent +const pipeline = flowAgent( { name: "summarize-pages", input: z.object({ urls: z.array(z.url()) }), @@ -343,7 +345,7 @@ export const summarizePages = pipeline.fn(); ## Verification - `result.ok` is `true` on success -- `result.output` contains the validated workflow output +- `result.output` contains the validated flow agent output - `result.trace` contains the frozen execution trace - `result.usage` contains aggregated token usage from all `$.agent()` calls - `result.duration` contains total wall-clock time in milliseconds diff --git a/packages/agents/docs/guides/create-tool.md b/packages/agents/docs/guides/create-tool.md index c6284a8..364abc8 100644 --- a/packages/agents/docs/guides/create-tool.md +++ b/packages/agents/docs/guides/create-tool.md @@ -33,10 +33,11 @@ Pass tools as a record on the agent config. The tool's name comes from the objec ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; const researcher = agent({ name: "researcher", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You research topics by fetching web pages.", tools: { fetchPage }, }); @@ -133,5 +134,5 @@ const createFile = tool({ ## References - [Create an Agent](create-agent.md) -- [Create a Workflow](create-workflow.md) +- [Create a Flow Agent](create-flow-agent.md) - [Troubleshooting](../troubleshooting.md) diff --git a/packages/agents/docs/guides/error-recovery.md b/packages/agents/docs/guides/error-recovery.md index d23d3c3..5de5ed0 100644 --- a/packages/agents/docs/guides/error-recovery.md +++ b/packages/agents/docs/guides/error-recovery.md @@ -1,11 +1,11 @@ # Handle Error Recovery -Patterns for building resilient agents and workflows that recover gracefully from failures. +Patterns for building resilient agents and flow agents that recover gracefully from failures. ## Prerequisites - `@funkai/agents` installed -- Familiarity with `workflow()`, `$.step`, `$.while`, `$.map`, and hooks +- Familiarity with `flowAgent()`, `$.step`, `$.while`, `$.map`, and hooks - Understanding of `StepResult` and `Result` types ## Steps @@ -15,10 +15,10 @@ Patterns for building resilient agents and workflows that recover gracefully fro Every `$` method returns `StepResult` with an `ok` field. Check it before accessing `.value` and provide a fallback when the step fails. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; -const resilient = workflow( +const resilient = flowAgent( { name: "resilient-fetch", input: z.object({ url: z.string() }), @@ -60,10 +60,10 @@ const resilient = workflow( Use `$.while` for retry logic with a bounded iteration count. The condition receives the last value and iteration index. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; -const retryable = workflow( +const retryable = flowAgent( { name: "retry-fetch", input: z.object({ url: z.string(), maxRetries: z.number().default(3) }), @@ -103,17 +103,18 @@ const retryable = workflow( When processing multiple items, some may fail while others succeed. Check each item's result independently rather than failing the entire batch. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const summarizer = agent({ name: "summarizer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Summarize briefly:\n\n${input.text}`, }); -const batchSummarizer = workflow( +const batchSummarizer = flowAgent( { name: "batch-summarize", input: z.object({ texts: z.array(z.string()) }), @@ -165,7 +166,7 @@ const batchSummarizer = workflow( Track consecutive failures and stop processing when a threshold is reached. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; interface CircuitState { @@ -174,7 +175,7 @@ interface CircuitState { readonly tripped: boolean; } -const circuitBreaker = workflow( +const circuitBreaker = flowAgent( { name: "circuit-breaker", input: z.object({ urls: z.array(z.string()), maxFailures: z.number().default(3) }), @@ -226,19 +227,19 @@ const circuitBreaker = workflow( ### 5. Log errors with hooks -Use workflow and step hooks to capture errors for logging and observability without interrupting the execution flow. +Use flow agent and step hooks to capture errors for logging and observability without interrupting the execution flow. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; -const observed = workflow( +const observed = flowAgent( { name: "observed-pipeline", input: z.object({ data: z.string() }), output: z.object({ result: z.string() }), onError: ({ input, error }) => { - console.error(`Workflow failed for input: ${JSON.stringify(input)}`, error.message); + console.error(`Flow agent failed for input: ${JSON.stringify(input)}`, error.message); }, onStepFinish: ({ step, result, duration }) => { if (result === undefined) { @@ -265,20 +266,21 @@ const observed = workflow( ### 6. Combine patterns for robust pipelines -Chain fallback, retry, and logging into a single workflow. +Chain fallback, retry, and logging into a single flow agent. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const analyzer = agent({ name: "analyzer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ content: z.string() }), prompt: ({ input }) => `Analyze this content:\n\n${input.content}`, }); -const robust = workflow( +const robust = flowAgent( { name: "robust-analysis", input: z.object({ url: z.string() }), @@ -332,7 +334,7 @@ const robust = workflow( - Failing steps return `StepResult` with `ok: false` instead of throwing - Retry loops terminate within the configured bounds -- Partial success workflows return results for both succeeded and failed items +- Partial success flow agents return results for both succeeded and failed items - Hook errors are swallowed and never mask the original error - Circuit breaker skips remaining items after the failure threshold @@ -366,6 +368,6 @@ const robust = workflow( - [Step Builder ($)](../core/step.md) - [Hooks](../core/hooks.md) -- [Create a Workflow](create-workflow.md) +- [Create a Flow Agent](create-flow-agent.md) - [Core Overview](../core/overview.md) - [Troubleshooting](../troubleshooting.md) diff --git a/packages/agents/docs/guides/multi-agent-orchestration.md b/packages/agents/docs/guides/multi-agent-orchestration.md index 3dc6658..b0483dd 100644 --- a/packages/agents/docs/guides/multi-agent-orchestration.md +++ b/packages/agents/docs/guides/multi-agent-orchestration.md @@ -5,7 +5,7 @@ Patterns for coordinating multiple agents: sequential chains, parallel execution ## Prerequisites - `@funkai/agents` installed -- Familiarity with `agent()`, `workflow()`, `$.agent`, `$.map`, `$.all`, and `$.race` +- Familiarity with `agent()`, `flowAgent()`, `$.agent`, `$.map`, `$.all`, and `$.race` - Understanding of subagents (the `agents` field on `AgentConfig`) ## Steps @@ -15,19 +15,20 @@ Patterns for coordinating multiple agents: sequential chains, parallel execution Pass the output of one agent as input to the next using `$.agent` steps in sequence. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const researcher = agent({ name: "researcher", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ topic: z.string() }), prompt: ({ input }) => `Research the topic thoroughly:\n\n${input.topic}`, }); const writer = agent({ name: "writer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ research: z.string(), topic: z.string() }), prompt: ({ input }) => `Write an article about "${input.topic}" using this research:\n\n${input.research}`, @@ -35,12 +36,12 @@ const writer = agent({ const editor = agent({ name: "editor", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ draft: z.string() }), prompt: ({ input }) => `Edit this article for clarity and correctness:\n\n${input.draft}`, }); -const pipeline = workflow( +const pipeline = flowAgent( { name: "content-pipeline", input: z.object({ topic: z.string() }), @@ -77,17 +78,18 @@ const pipeline = workflow( Process multiple inputs concurrently with the same agent using `$.map`. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const translator = agent({ name: "translator", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string(), targetLang: z.string() }), prompt: ({ input }) => `Translate to ${input.targetLang}:\n\n${input.text}`, }); -const batchTranslate = workflow( +const batchTranslate = flowAgent( { name: "batch-translate", input: z.object({ @@ -126,31 +128,32 @@ const batchTranslate = workflow( When different agents need to run concurrently on different tasks, use `$.all`. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const sentimentAgent = agent({ name: "sentiment", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Analyze the sentiment of this text:\n\n${input.text}`, }); const summaryAgent = agent({ name: "summary", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Summarize this text:\n\n${input.text}`, }); const keywordAgent = agent({ name: "keywords", - model: "openai/gpt-4.1-mini", + model: openai("gpt-4.1-mini"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Extract keywords from this text:\n\n${input.text}`, }); -const analyze = workflow( +const analyze = flowAgent( { name: "parallel-analysis", input: z.object({ text: z.string() }), @@ -195,25 +198,26 @@ Declare agents in the `agents` field to let the parent delegate tasks via functi ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const codeWriter = agent({ name: "code-writer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ spec: z.string() }), prompt: ({ input }) => `Write TypeScript code for this specification:\n\n${input.spec}`, }); const codeReviewer = agent({ name: "code-reviewer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ code: z.string() }), prompt: ({ input }) => `Review this TypeScript code for bugs and improvements:\n\n${input.code}`, }); const techLead = agent({ name: "tech-lead", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: `You are a tech lead. Break down tasks and delegate: - Use the code-writer agent to write code from specs. - Use the code-reviewer agent to review written code. @@ -222,7 +226,7 @@ Coordinate the work and provide the final result.`, }); // The tech lead decides when to call each subagent -const result = await techLead.generate("Build a rate limiter module"); +const result = await techLead.generate({ prompt: "Build a rate limiter module" }); ``` ### 5. Implement voting with multiple models @@ -230,7 +234,9 @@ const result = await techLead.generate("Build a rate limiter module"); Race or poll multiple models and select the most common answer. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { anthropic } from "@ai-sdk/anthropic"; import { z } from "zod"; const OutputSchema = z.object({ @@ -240,7 +246,7 @@ const OutputSchema = z.object({ const classifierA = agent({ name: "classifier-a", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), output: OutputSchema, input: z.object({ text: z.string() }), prompt: ({ input }) => `Classify this issue:\n\n${input.text}`, @@ -248,7 +254,7 @@ const classifierA = agent({ const classifierB = agent({ name: "classifier-b", - model: "anthropic/claude-sonnet-4", + model: anthropic("claude-sonnet-4-20250514"), output: OutputSchema, input: z.object({ text: z.string() }), prompt: ({ input }) => `Classify this issue:\n\n${input.text}`, @@ -256,7 +262,7 @@ const classifierB = agent({ const classifierC = agent({ name: "classifier-c", - model: "openai/gpt-4.1-mini", + model: openai("gpt-4.1-mini"), output: OutputSchema, input: z.object({ text: z.string() }), prompt: ({ input }) => `Classify this issue:\n\n${input.text}`, @@ -280,7 +286,7 @@ const majorityVote = ( return best; }; -const voter = workflow( +const voter = flowAgent( { name: "voting-classifier", input: z.object({ text: z.string() }), @@ -319,22 +325,23 @@ const voter = workflow( Use `$.race` to get the first successful response from multiple providers or models. ```ts -import { workflow, agent } from "@funkai/agents"; +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; const fastAgent = agent({ name: "fast", - model: "openai/gpt-4.1-mini", + model: openai("gpt-4.1-mini"), system: "Respond concisely.", }); const qualityAgent = agent({ name: "quality", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "Respond concisely.", }); -const racingWorkflow = workflow( +const racingFlowAgent = flowAgent( { name: "fastest-response", input: z.object({ question: z.string() }), @@ -344,8 +351,11 @@ const racingWorkflow = workflow( const result = await $.race({ id: "race-models", entries: [ - () => fastAgent.generate(input.question).then((r) => ({ ...r, model: "fast" })), - () => qualityAgent.generate(input.question).then((r) => ({ ...r, model: "quality" })), + () => fastAgent.generate({ prompt: input.question }).then((r) => ({ ...r, model: "fast" })), + () => + qualityAgent + .generate({ prompt: input.question }) + .then((r) => ({ ...r, model: "quality" })), ], }); @@ -364,23 +374,24 @@ const racingWorkflow = workflow( ### 7. Build hierarchical agent trees -Combine subagents with workflows for multi-level delegation. Each level can have its own subagents. +Combine subagents with flow agents for multi-level delegation. Each level can have its own subagents. ```ts -import { agent, workflow } from "@funkai/agents"; +import { agent, flowAgent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; // Level 2: Specialist agents const dataCollector = agent({ name: "data-collector", - model: "openai/gpt-4.1-mini", + model: openai("gpt-4.1-mini"), input: z.object({ query: z.string() }), prompt: ({ input }) => `Find relevant data for: ${input.query}`, }); const dataAnalyst = agent({ name: "data-analyst", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ data: z.string() }), prompt: ({ input }) => `Analyze this data and provide insights:\n\n${input.data}`, }); @@ -388,20 +399,20 @@ const dataAnalyst = agent({ // Level 1: Team lead agents with subagents const researchLead = agent({ name: "research-lead", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You lead research. Use the data-collector to gather information.", agents: { dataCollector }, }); const analysisLead = agent({ name: "analysis-lead", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You lead analysis. Use the data-analyst to analyze data.", agents: { dataAnalyst }, }); -// Level 0: Top-level workflow -const project = workflow( +// Level 0: Top-level flow agent +const project = flowAgent( { name: "research-project", input: z.object({ question: z.string() }), @@ -476,6 +487,6 @@ const project = workflow( - [Agent](../core/agent.md) - [Step Builder ($)](../core/step.md) - [Create an Agent](create-agent.md) -- [Create a Workflow](create-workflow.md) +- [Create a Flow Agent](create-flow-agent.md) - [Hooks](../core/hooks.md) - [Troubleshooting](../troubleshooting.md) diff --git a/packages/agents/docs/guides/test-agents.md b/packages/agents/docs/guides/test-agents.md index 8e6a093..169d4ea 100644 --- a/packages/agents/docs/guides/test-agents.md +++ b/packages/agents/docs/guides/test-agents.md @@ -1,12 +1,12 @@ -# Test Agents and Workflows +# Test Agents and Flow Agents -Patterns for unit testing agents, workflows, and tools with mocked models and deterministic assertions. +Patterns for unit testing agents, flow agents, and tools with mocked models and deterministic assertions. ## Prerequisites - `@funkai/agents` installed - Vitest configured (`pnpm test --filter=@funkai/agents`) -- Familiarity with `agent()`, `workflow()`, and `tool()` APIs +- Familiarity with `agent()`, `flowAgent()`, and `tool()` APIs ## Steps @@ -16,13 +16,14 @@ Agents accept a `model` override on each `.generate()` call. This is useful for ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; import { describe, it, expect } from "vitest"; import { simulateReadableStream } from "ai"; const summarizer = agent({ name: "summarizer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), input: z.object({ text: z.string() }), prompt: ({ input }) => `Summarize:\n\n${input.text}`, }); @@ -53,21 +54,22 @@ describe("summarizer", () => { ### 2. Assert on Result shape -Every agent and workflow returns `Result`. Test both success and error paths by checking `result.ok`. +Every agent and flow agent returns `Result`. Test both success and error paths by checking `result.ok`. ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { describe, it, expect } from "vitest"; const helper = agent({ name: "helper", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are a helpful assistant.", }); describe("helper", () => { it("succeeds with a string output", async () => { - const result = await helper.generate("What is TypeScript?"); + const result = await helper.generate({ prompt: "What is TypeScript?" }); if (result.ok) { expect(result.output).toBeDefined(); @@ -80,7 +82,8 @@ describe("helper", () => { const controller = new AbortController(); controller.abort(); - const result = await helper.generate("This will be cancelled", { + const result = await helper.generate({ + prompt: "This will be cancelled", signal: controller.signal, }); @@ -98,12 +101,13 @@ When an agent has an `output` schema, assert on the typed shape of `result.outpu ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; import { describe, it, expect } from "vitest"; const classifier = agent({ name: "classifier", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), output: z.object({ category: z.enum(["bug", "feature", "question"]), confidence: z.number(), @@ -156,16 +160,16 @@ describe("calculator tool", () => { }); ``` -### 5. Test workflow steps +### 5. Test flow agent steps -Workflows have typed input/output schemas. Test the full pipeline or individual steps by checking `result.ok`, `result.output`, and `result.trace`. +Flow agents have typed input/output schemas. Test the full pipeline or individual steps by checking `result.ok`, `result.output`, and `result.trace`. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; import { describe, it, expect } from "vitest"; -const pipeline = workflow( +const pipeline = flowAgent( { name: "text-stats", input: z.object({ text: z.string() }), @@ -186,7 +190,7 @@ const pipeline = workflow( }, ); -describe("text-stats workflow", () => { +describe("text-stats flow agent", () => { it("computes word and character counts", async () => { const result = await pipeline.generate({ text: "hello world" }); @@ -211,11 +215,11 @@ describe("text-stats workflow", () => { Verify that failing steps produce `ok: false` with meaningful error codes. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; import { describe, it, expect } from "vitest"; -const failingWorkflow = workflow( +const failingFlowAgent = flowAgent( { name: "failing", input: z.object({ shouldFail: z.boolean() }), @@ -236,7 +240,7 @@ const failingWorkflow = workflow( describe("error paths", () => { it("handles step failure gracefully", async () => { - const result = await failingWorkflow.generate({ shouldFail: true }); + const result = await failingFlowAgent.generate({ shouldFail: true }); expect(result.ok).toBe(true); if (result.ok) { @@ -245,7 +249,7 @@ describe("error paths", () => { }); it("succeeds on happy path", async () => { - const result = await failingWorkflow.generate({ shouldFail: false }); + const result = await failingFlowAgent.generate({ shouldFail: false }); expect(result.ok).toBe(true); if (result.ok) { @@ -261,17 +265,18 @@ Verify that `result.usage` contains expected token counts after generation. ```ts import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; import { describe, it, expect } from "vitest"; const helper = agent({ name: "helper", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "Reply with one word.", }); describe("usage tracking", () => { it("reports token usage on successful generation", async () => { - const result = await helper.generate("Say hello"); + const result = await helper.generate({ prompt: "Say hello" }); if (result.ok) { expect(result.usage.inputTokens).toBeGreaterThan(0); @@ -287,21 +292,21 @@ describe("usage tracking", () => { Capture lifecycle events with hooks to verify execution order and timing. ```ts -import { workflow } from "@funkai/agents"; +import { flowAgent } from "@funkai/agents"; import { z } from "zod"; import { describe, it, expect } from "vitest"; -describe("workflow hooks", () => { +describe("flow agent hooks", () => { it("fires hooks in correct order", async () => { const events: string[] = []; - const traced = workflow( + const traced = flowAgent( { name: "traced", input: z.object({ value: z.string() }), output: z.object({ result: z.string() }), onStart: () => { - events.push("workflow:start"); + events.push("flow:start"); }, onStepStart: ({ step }) => { events.push(`step:start:${step.id}`); @@ -310,7 +315,7 @@ describe("workflow hooks", () => { events.push(`step:finish:${step.id}`); }, onFinish: () => { - events.push("workflow:finish"); + events.push("flow:finish"); }, }, async ({ input, $ }) => { @@ -325,10 +330,10 @@ describe("workflow hooks", () => { await traced.generate({ value: "test" }); expect(events).toEqual([ - "workflow:start", + "flow:start", "step:start:process", "step:finish:process", - "workflow:finish", + "flow:finish", ]); }); }); @@ -364,7 +369,7 @@ describe("workflow hooks", () => { ## References - [Create an Agent](create-agent.md) -- [Create a Workflow](create-workflow.md) +- [Create a Flow Agent](create-flow-agent.md) - [Hooks](../core/hooks.md) - [Core Overview](../core/overview.md) - [Troubleshooting](../troubleshooting.md) diff --git a/packages/agents/docs/hooks.md b/packages/agents/docs/hooks.md new file mode 100644 index 0000000..9b94b16 --- /dev/null +++ b/packages/agents/docs/hooks.md @@ -0,0 +1,155 @@ +# Hooks + +Hooks provide lifecycle callbacks for agents, flow agents, and steps. All hooks are optional. Hook errors are swallowed (logged via `attemptEachAsync`, never thrown) so they never mask the original error or interrupt execution. + +## Agent Hooks + +Set on `AgentConfig`: + +| Hook | Event fields | When | +| -------------- | ----------------------------- | ---------------------------------------------------------------------------- | +| `onStart` | `{ input }` | Before the model is called | +| `onFinish` | `{ input, result, duration }` | After successful generation | +| `onError` | `{ input, error }` | On error, before Result is returned | +| `onStepFinish` | `{ stepId }` | After each tool-loop step (counter-based: `agentName:0`, `agentName:1`, ...) | + +## Flow Agent Hooks + +Set on `FlowAgentConfig`: + +| Hook | Event fields | When | +| -------------- | -------------------------------------- | ----------------------------------------------------- | +| `onStart` | `{ input }` | After input validation, before handler runs | +| `onFinish` | `{ input, result, duration }` | After successful completion | +| `onError` | `{ input, error }` | On error, before Result is returned | +| `onStepStart` | `{ step: StepInfo }` | Before any `$` operation executes | +| `onStepFinish` | `{ step: StepInfo, result, duration }` | After any `$` operation completes (success AND error) | + +`onStepFinish` fires on both success and error. On error, `result` is `undefined`. + +## Step-Level Hooks + +Each `$` config accepts its own hooks: + +| Hook | Event fields | When | +| ---------- | -------------------------- | -------------------------- | +| `onStart` | `{ id }` | Before the step executes | +| `onFinish` | `{ id, result, duration }` | After successful execution | +| `onError` | `{ id, error }` | On error | + +These are available on `$.step`, `$.agent`, `$.map`, `$.each`, `$.reduce`, `$.while`, `$.all`, and `$.race`. + +## Per-Call Hooks + +Agent per-call hooks are set on the `GenerateParams` object passed to `.generate()` or `.stream()`. They have the same names as the base hooks but fire **after** the base hooks. + +```ts +await myAgent.generate({ + prompt: "hello", + onStart: ({ input }) => console.log("call-level start"), + onFinish: ({ result, duration }) => console.log(`call done in ${duration}ms`), +}); +``` + +## Hook Merging + +Per-call hooks merge with base hooks -- base fires first, then call-level. Both are independently wrapped with `attemptEachAsync`, so an error in one hook does not prevent the other from running. + +For flow engines created with `createFlowEngine()`, engine-level hooks fire first, then flow agent-level hooks fire second. + +## Hook Execution Order + +For a `$.agent` call inside a flow agent: + +``` +step.onStart -> flowAgent.onStepStart -> execute -> step.onFinish -> flowAgent.onStepFinish +``` + +On error, the sequence diverges: + +``` +step.onStart -> flowAgent.onStepStart -> execute (throws) -> step.onError -> flowAgent.onStepFinish +``` + +For an agent's tool-loop steps: + +``` +base.onStepFinish -> overrides.onStepFinish +``` + +The `stepId` for agent tool-loop steps is counter-based: `agentName:0`, `agentName:1`, etc. + +## Sub-Agent Hook Forwarding + +When a parent agent has sub-agents (via the `agents` config), those sub-agents are wrapped as tools. The parent forwards a subset of its hooks to each sub-agent so internal activity is observable. + +### What gets forwarded (safe -- fixed event types) + +| Hook | Event type | Why safe | +| -------------- | ----------------- | -------------------------------------- | +| `onStepStart` | `StepInfo` | Fixed type, same shape for every agent | +| `onStepFinish` | `StepFinishEvent` | Fixed type, same shape for every agent | +| `logger` | `Logger` | No event type, just a logger instance | + +These hooks are passed directly into `child.generate()` as per-call hooks. The parent's `onStepFinish` is merged (config + per-call) before forwarding, so both the config-level and call-level hooks fire for sub-agent steps. + +### What stays at the parent (not forwarded -- generic event types) + +| Hook | Event type | Why not forwarded | +| ---------- | -------------------------------------------------------------- | ----------------------------------------- | +| `onStart` | `{ input: TInput }` | `TInput` differs between parent and child | +| `onFinish` | `{ input: TInput, result: GenerateResult, duration }` | Both `TInput` and `TOutput` differ | +| `onError` | `{ input: TInput, error: Error }` | `TInput` differs between parent and child | + +These hooks are parameterized by the agent's generic types (`TInput`, `TOutput`). A parent typed `Agent<{ userId: string }>` would have `onStart: (e: { input: { userId: string } }) => void`, but a sub-agent might expect `{ query: string }`. Forwarding the parent's hook to the child would cause the hook to receive the wrong event shape at runtime -- the compiler cannot catch this because the type boundary is erased when hooks cross agent boundaries. + +Sub-agent lifecycle activity is still observable at the parent level through `onStepFinish`, which fires for each tool-loop step including sub-agent tool calls and their results. + +### Lifecycle diagram + +``` +Parent.generate({ input, onStepFinish }) + | + +-- Parent fires own onStart({ input }) <- parent's TInput, type-safe + | + +-- generateText() runs tool loop + | | + | +-- Step 0: LLM calls sub-agent tool + | | | + | | | Passed into child.generate(): + | | | logger -> parent's logger + | | | onStepStart -> parent's onStepStart (StepInfo -- fixed type) + | | | onStepFinish -> parent's merged onStepFinish (StepFinishEvent -- fixed type) + | | | + | | | NOT passed: + | | | onStart, onFinish, onError (generic types -- would break type safety) + | | | + | | +-- Child fires own onStart({ input }) <- child's TInput, type-safe + | | +-- Child runs tool loop + | | | +-- Child step 0 -> parent's onStepFinish fires (StepFinishEvent) + | | | +-- Child step 1 -> parent's onStepFinish fires (StepFinishEvent) + | | +-- Child fires own onFinish(...) <- child's types, type-safe + | | +-- Returns result to parent + | | + | +-- Parent's onStepFinish fires for step 0 (includes sub-agent tool result) + | + +-- Parent fires own onFinish({ input, result }) <- parent's TInput/TOutput, type-safe + | + +-- Returns Result to caller +``` + +## Error Handling + +All hooks are executed via `attemptEachAsync`, which: + +1. Runs each hook sequentially. +2. Catches and swallows any errors -- hook failures never propagate. +3. Skips `undefined` hooks (no null checks needed at call sites). + +This means a failing hook will never mask the original error or prevent other hooks from running. + +## References + +- [Create an Agent](create-agent.md) +- [Create a Flow Agent](create-flow-agent.md) +- [Step Builder ($)](step-builder.md) diff --git a/packages/agents/docs/middleware.md b/packages/agents/docs/middleware.md new file mode 100644 index 0000000..c67277f --- /dev/null +++ b/packages/agents/docs/middleware.md @@ -0,0 +1,170 @@ +# Middleware + +Middleware wraps language models with additional behavior -- logging, caching, rate limiting, or devtools integration. The `withModelMiddleware()` function applies middleware using the AI SDK's `wrapLanguageModel()` under the hood. + +## Architecture + +```mermaid +%%{init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#313244', + 'primaryTextColor': '#cdd6f4', + 'primaryBorderColor': '#6c7086', + 'lineColor': '#89b4fa', + 'secondaryColor': '#45475a', + 'tertiaryColor': '#1e1e2e', + 'background': '#1e1e2e', + 'mainBkg': '#313244', + 'clusterBkg': '#1e1e2e', + 'clusterBorder': '#45475a' + }, + 'flowchart': { 'curve': 'basis', 'padding': 15 } +}}%% +flowchart LR + A(["Agent"]) --> B(["withModelMiddleware()"]) + B --> C(["Custom MW 1"]) + C --> D(["Custom MW 2"]) + D --> E(["Devtools MW"]) + E --> F(["Language Model"]) + + classDef core fill:#313244,stroke:#89b4fa,stroke-width:2px,color:#cdd6f4 + classDef gateway fill:#313244,stroke:#fab387,stroke-width:2px,color:#cdd6f4 + classDef agent fill:#313244,stroke:#a6e3a1,stroke-width:2px,color:#cdd6f4 + + class A agent + class B,C,D core + class E gateway + class F agent +``` + +Middleware runs in array order -- the first entry wraps outermost, meaning it intercepts calls first and sees responses last. + +## Key Concepts + +### withModelMiddleware() + +Wraps a language model with one or more `LanguageModelMiddleware` layers. In development (`NODE_ENV === 'development'`), the AI SDK devtools middleware is appended automatically. + +```ts +const wrappedModel = await withModelMiddleware({ + model: baseModel, + middleware: [loggingMiddleware, cachingMiddleware], +}); +``` + +### WrapModelOptions + +| Field | Type | Default | Description | +| ------------ | --------------------------- | ---------------------------------- | --------------------------------------------- | +| `model` | `LanguageModel` | -- | The base language model to wrap | +| `middleware` | `LanguageModelMiddleware[]` | `[]` | Custom middleware applied before defaults | +| `devtools` | `boolean` | `true` when `NODE_ENV=development` | Whether to include AI SDK devtools middleware | + +### Middleware Ordering + +Custom middleware runs **before** default middleware. Within the custom array, the first entry wraps outermost: + +``` +Request flow: Custom MW 1 -> Custom MW 2 -> Devtools -> Model +Response flow: Model -> Devtools -> Custom MW 2 -> Custom MW 1 +``` + +### Devtools + +The AI SDK devtools middleware (`@ai-sdk/devtools`) is included automatically in development. Control this behavior explicitly: + +```ts +// Force-enable in production +const model = await withModelMiddleware({ + model: baseModel, + devtools: true, +}); + +// Force-disable in development +const model = await withModelMiddleware({ + model: baseModel, + devtools: false, +}); +``` + +When no middleware is configured and devtools is disabled, the original model is returned unchanged -- no wrapping overhead. + +## Usage + +### Basic Middleware + +```ts +import { type LanguageModelMiddleware } from "ai"; + +const loggingMiddleware: LanguageModelMiddleware = { + wrapGenerate: async ({ doGenerate, params }) => { + console.log("generate called", params.prompt); + const result = await doGenerate(); + console.log("generate finished", result.text); + return result; + }, +}; + +const model = await withModelMiddleware({ + model: baseModel, + middleware: [loggingMiddleware], + devtools: false, +}); +``` + +### Composing Multiple Middleware + +```ts +const rateLimiter: LanguageModelMiddleware = { + wrapGenerate: async ({ doGenerate }) => { + await acquireToken(); + return doGenerate(); + }, +}; + +const cacheMiddleware: LanguageModelMiddleware = { + wrapGenerate: async ({ doGenerate, params }) => { + const cached = await cache.get(params.prompt); + if (cached) return cached; + const result = await doGenerate(); + await cache.set(params.prompt, result); + return result; + }, +}; + +const model = await withModelMiddleware({ + model: baseModel, + middleware: [rateLimiter, cacheMiddleware], + devtools: false, +}); +``` + +Rate limiting wraps outermost, so it fires before the cache check. The cache middleware wraps the actual model call. + +### Using with Agents + +Middleware is applied at the model level, before passing the model to an agent: + +```ts +const wrappedModel = await withModelMiddleware({ + model: baseModel, + middleware: [loggingMiddleware], +}); + +const myAgent = agent({ + name: "assistant", + model: wrappedModel, + system: "You are helpful.", +}); +``` + +## Resources + +- [AI SDK Middleware](https://ai-sdk.dev/docs/ai-sdk-core/middleware) +- [AI SDK Devtools](https://ai-sdk.dev/docs/ai-sdk-ui/devtools) + +## References + +- [Overview](overview.md) +- [Create an Agent](create-agent.md) diff --git a/packages/agents/docs/multi-agent-orchestration.md b/packages/agents/docs/multi-agent-orchestration.md new file mode 100644 index 0000000..e823355 --- /dev/null +++ b/packages/agents/docs/multi-agent-orchestration.md @@ -0,0 +1,429 @@ +# Orchestrate Multiple Agents + +Patterns for coordinating multiple agents: sequential chains, parallel execution, agent handoff, voting, and hierarchical delegation. + +## Prerequisites + +- `@funkai/agents` installed +- Familiarity with `agent()`, `flowAgent()`, `$.agent`, `$.map`, `$.all`, and `$.race` +- Understanding of subagents (the `agents` field on `AgentConfig`) + +## Steps + +### 1. Chain agents sequentially + +Pass the output of one agent as input to the next using `$.agent` steps in sequence. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const researcher = agent({ + name: "researcher", + model: openai("gpt-4.1"), + input: z.object({ topic: z.string() }), + prompt: ({ input }) => `Research the topic thoroughly:\n\n${input.topic}`, +}); + +const writer = agent({ + name: "writer", + model: openai("gpt-4.1"), + input: z.object({ research: z.string(), topic: z.string() }), + prompt: ({ input }) => + `Write an article about "${input.topic}" using this research:\n\n${input.research}`, +}); + +const editor = agent({ + name: "editor", + model: openai("gpt-4.1"), + input: z.object({ draft: z.string() }), + prompt: ({ input }) => `Edit this article for clarity and correctness:\n\n${input.draft}`, +}); + +const pipeline = flowAgent( + { + name: "content-pipeline", + input: z.object({ topic: z.string() }), + output: z.object({ article: z.string() }), + }, + async ({ input, $ }) => { + const research = await $.agent({ + id: "research", + agent: researcher, + input: { topic: input.topic }, + }); + if (!research.ok) return { article: "Research failed" }; + + const draft = await $.agent({ + id: "write", + agent: writer, + input: { research: research.value.output, topic: input.topic }, + }); + if (!draft.ok) return { article: "Writing failed" }; + + const edited = await $.agent({ + id: "edit", + agent: editor, + input: { draft: draft.value.output }, + }); + + return { article: edited.ok ? edited.value.output : draft.value.output }; + }, +); +``` + +### 2. Run agents in parallel with `$.map` + +Process multiple inputs concurrently with the same agent using `$.map`. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const translator = agent({ + name: "translator", + model: openai("gpt-4.1"), + input: z.object({ text: z.string(), targetLang: z.string() }), + prompt: ({ input }) => `Translate to ${input.targetLang}:\n\n${input.text}`, +}); + +const batchTranslate = flowAgent( + { + name: "batch-translate", + input: z.object({ text: z.string(), languages: z.array(z.string()) }), + output: z.object({ + translations: z.array(z.object({ language: z.string(), text: z.string() })), + }), + }, + async ({ input, $ }) => { + const results = await $.map({ + id: "translate-all", + input: input.languages, + concurrency: 5, + execute: async ({ item: language, $: step$ }) => { + const result = await step$.agent({ + id: `translate-${language}`, + agent: translator, + input: { text: input.text, targetLang: language }, + }); + return { + language, + text: result.ok ? result.value.output : `Translation to ${language} failed`, + }; + }, + }); + + return { translations: results.ok ? results.value : [] }; + }, +); +``` + +### 3. Run heterogeneous agents in parallel with `$.all` + +When different agents need to run concurrently on different tasks, use `$.all`. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const sentimentAgent = agent({ + name: "sentiment", + model: openai("gpt-4.1"), + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Analyze the sentiment of this text:\n\n${input.text}`, +}); + +const summaryAgent = agent({ + name: "summary", + model: openai("gpt-4.1"), + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Summarize this text:\n\n${input.text}`, +}); + +const analyze = flowAgent( + { + name: "parallel-analysis", + input: z.object({ text: z.string() }), + output: z.object({ sentiment: z.string(), summary: z.string() }), + }, + async ({ input, $ }) => { + const results = await $.all({ + id: "analyze-parallel", + entries: [ + (signal) => sentimentAgent.generate({ input: { text: input.text }, signal }), + (signal) => summaryAgent.generate({ input: { text: input.text }, signal }), + ], + }); + + if (!results.ok) { + return { sentiment: "unknown", summary: "unavailable" }; + } + + const [sentiment, summary] = results.value as readonly [ + Awaited>, + Awaited>, + ]; + + return { + sentiment: sentiment.ok ? sentiment.output : "unknown", + summary: summary.ok ? summary.output : "unavailable", + }; + }, +); +``` + +### 4. Use subagents for agent handoff + +Declare agents in the `agents` field to let the parent delegate tasks via function calling. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const codeWriter = agent({ + name: "code-writer", + model: openai("gpt-4.1"), + input: z.object({ spec: z.string() }), + prompt: ({ input }) => `Write TypeScript code for this specification:\n\n${input.spec}`, +}); + +const codeReviewer = agent({ + name: "code-reviewer", + model: openai("gpt-4.1"), + input: z.object({ code: z.string() }), + prompt: ({ input }) => `Review this TypeScript code for bugs and improvements:\n\n${input.code}`, +}); + +const techLead = agent({ + name: "tech-lead", + model: openai("gpt-4.1"), + system: `You are a tech lead. Break down tasks and delegate: +- Use the code-writer agent to write code from specs. +- Use the code-reviewer agent to review written code. +Coordinate the work and provide the final result.`, + agents: { codeWriter, codeReviewer }, +}); + +const result = await techLead.generate({ prompt: "Build a rate limiter module" }); +``` + +### 5. Implement voting with multiple models + +Race or poll multiple models and select the most common answer. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { anthropic } from "@ai-sdk/anthropic"; +import { z } from "zod"; + +const OutputSchema = z.object({ + category: z.enum(["bug", "feature", "question"]), + confidence: z.number(), +}); + +const classifierA = agent({ + name: "classifier-a", + model: openai("gpt-4.1"), + output: OutputSchema, + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Classify this issue:\n\n${input.text}`, +}); + +const classifierB = agent({ + name: "classifier-b", + model: anthropic("claude-sonnet-4-20250514"), + output: OutputSchema, + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Classify this issue:\n\n${input.text}`, +}); + +const voter = flowAgent( + { + name: "voting-classifier", + input: z.object({ text: z.string() }), + output: z.object({ category: z.string(), votes: z.number() }), + }, + async ({ input, $ }) => { + const results = await $.map({ + id: "collect-votes", + input: [classifierA, classifierB], + concurrency: 2, + execute: async ({ item: classifier, index, $ }) => { + const result = await $.agent({ + id: `vote-${index}`, + agent: classifier, + input: { text: input.text }, + }); + return result.ok ? result.value.output : { category: "unknown" as const, confidence: 0 }; + }, + }); + + const votes = results.ok ? results.value : []; + const validVotes = votes.filter((v) => v.category !== "unknown"); + + const counts = new Map(); + for (const vote of validVotes) { + counts.set(vote.category, (counts.get(vote.category) ?? 0) + 1); + } + let best = "unknown"; + let bestCount = 0; + for (const [category, count] of counts) { + if (count > bestCount) { + best = category; + bestCount = count; + } + } + + return { category: best, votes: validVotes.length }; + }, +); +``` + +### 6. Race agents for fastest response + +Use `$.race` to get the first successful response from multiple providers. + +```ts +import { flowAgent, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const fastAgent = agent({ + name: "fast", + model: openai("gpt-4.1-mini"), + system: "Respond concisely.", +}); + +const qualityAgent = agent({ + name: "quality", + model: openai("gpt-4.1"), + system: "Respond concisely.", +}); + +const racingFlowAgent = flowAgent( + { + name: "fastest-response", + input: z.object({ question: z.string() }), + output: z.object({ answer: z.string(), winner: z.string() }), + }, + async ({ input, $ }) => { + const result = await $.race({ + id: "race-models", + entries: [ + (signal) => + fastAgent + .generate({ prompt: input.question, signal }) + .then((r) => ({ ...r, model: "fast" })), + (signal) => + qualityAgent + .generate({ prompt: input.question, signal }) + .then((r) => ({ ...r, model: "quality" })), + ], + }); + + if (!result.ok) { + return { answer: "No response available", winner: "none" }; + } + + const winner = result.value as { ok: boolean; output: string; model: string }; + return { + answer: winner.ok ? winner.output : "Response failed", + winner: winner.model, + }; + }, +); +``` + +### 7. Build hierarchical agent trees + +Combine subagents with flow agents for multi-level delegation. + +```ts +import { agent, flowAgent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const dataCollector = agent({ + name: "data-collector", + model: openai("gpt-4.1-mini"), + input: z.object({ query: z.string() }), + prompt: ({ input }) => `Find relevant data for: ${input.query}`, +}); + +const researchLead = agent({ + name: "research-lead", + model: openai("gpt-4.1"), + system: "You lead research. Use the data-collector to gather information.", + agents: { dataCollector }, +}); + +const project = flowAgent( + { + name: "research-project", + input: z.object({ question: z.string() }), + output: z.object({ findings: z.string() }), + }, + async ({ input, $ }) => { + const research = await $.agent({ + id: "research-phase", + agent: researchLead, + input: `Research this question: ${input.question}`, + }); + + return { + findings: research.ok ? research.value.output : "Research unavailable", + }; + }, +); +``` + +## Verification + +- Sequential chains pass output from one agent to the next +- `$.map` processes items concurrently up to the `concurrency` limit +- `$.all` runs heterogeneous agents concurrently and returns results in entry order +- Subagents appear as callable tools in the parent agent's context +- `$.race` returns the first result and cancels remaining entries +- Abort signals propagate from parent to child agents + +## Troubleshooting + +### Subagent never gets called + +**Issue:** The parent agent does not invoke the subagent during its tool loop. + +**Fix:** Improve the parent's `system` prompt to explicitly mention when to use the subagent. + +### `$.all` returns wrong types + +**Issue:** The `results.value` array has `unknown[]` type. + +**Fix:** Cast the destructured values to the expected types. + +### Race does not cancel losers + +**Issue:** Losing entries continue executing after the winner resolves. + +**Fix:** Entries must accept and respect the `AbortSignal` passed to their factory function. + +### Agent handoff loses context + +**Issue:** The subagent does not have access to the parent's conversation history. + +**Fix:** Subagents start with a fresh context. Pass relevant information explicitly in the input. + +### Parallel agents hit rate limits + +**Issue:** Running too many concurrent agent calls triggers provider rate limits. + +**Fix:** Use the `concurrency` parameter on `$.map` to limit parallelism. + +## References + +- [`agent()` reference](/reference/agents/agent) +- [`flowAgent()` reference](/reference/agents/flow-agent) diff --git a/packages/agents/docs/output-strategies.md b/packages/agents/docs/output-strategies.md new file mode 100644 index 0000000..c43eb68 --- /dev/null +++ b/packages/agents/docs/output-strategies.md @@ -0,0 +1,228 @@ +# Output Strategies + +Output strategies control the shape and validation of agent generation output. The `output` field on `AgentConfig` accepts either an AI SDK `Output` strategy or a raw Zod schema, which the framework auto-wraps. + +## Architecture + +```mermaid +%%{init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#313244', + 'primaryTextColor': '#cdd6f4', + 'primaryBorderColor': '#6c7086', + 'lineColor': '#89b4fa', + 'secondaryColor': '#45475a', + 'tertiaryColor': '#1e1e2e', + 'background': '#1e1e2e', + 'mainBkg': '#313244', + 'clusterBkg': '#1e1e2e', + 'clusterBorder': '#45475a' + }, + 'flowchart': { 'curve': 'basis', 'padding': 15 } +}}%% +flowchart LR + A{"OutputParam"} -- "OutputSpec" --> B(["Use as-is"]) + A -- "ZodType" --> C{"isZodArray?"} + C -- "Yes" --> D(["Output.array()"]) + C -- "No" --> E(["Output.object()"]) + + classDef core fill:#313244,stroke:#89b4fa,stroke-width:2px,color:#cdd6f4 + classDef gateway fill:#313244,stroke:#fab387,stroke-width:2px,color:#cdd6f4 + classDef agent fill:#313244,stroke:#a6e3a1,stroke-width:2px,color:#cdd6f4 + + class A gateway + class B,D,E agent + class C core +``` + +## Key Concepts + +### OutputParam + +The accepted type for the `output` config field: + +```ts +type OutputParam = OutputSpec | ZodType; +``` + +When a raw `ZodType` is passed, the framework resolves it via `resolveOutput()`: + +- `z.array(...)` becomes `Output.array({ element: innerSchema })` +- Anything else becomes `Output.object({ schema })` + +### Available Strategies + +| Strategy | Output Type | Description | +| ---------------------------- | ---------------- | ------------------------------------------ | +| `Output.text()` | `string` | Plain text (default when `output` omitted) | +| `Output.object({ schema })` | Schema type `T` | Validated structured object | +| `Output.array({ element })` | `T[]` | Validated array of elements | +| `Output.choice({ options })` | Union of options | Enum/classification | +| `z.object({ ... })` | Schema type `T` | Auto-wrapped as `Output.object()` | +| `z.array(z.object({ ... }))` | `T[]` | Auto-wrapped as `Output.array()` | + +### Resolution Logic + +The `resolveOutput()` function distinguishes between `OutputSpec` and `ZodType` by checking for the presence of `parseCompleteOutput` -- a method that exists on AI SDK `Output` instances but not on Zod schemas. + +## Usage + +### Output.text() (Default) + +When `output` is omitted, agents produce plain string output: + +```ts +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are helpful.", +}); + +const result = await helper.generate({ prompt: "What is TypeScript?" }); +if (result.ok) { + console.log(result.output); // string +} +``` + +### Output.object() + +Produce a validated structured object: + +```ts +import { Output } from "ai"; +import { openai } from "@ai-sdk/openai"; + +const analyzer = agent({ + name: "analyzer", + model: openai("gpt-4.1"), + system: "Analyze the sentiment of the given text.", + output: Output.object({ + schema: z.object({ + sentiment: z.enum(["positive", "negative", "neutral"]), + confidence: z.number().min(0).max(1), + reasoning: z.string(), + }), + }), +}); + +const result = await analyzer.generate({ prompt: "I love this product!" }); +if (result.ok) { + console.log(result.output.sentiment); // "positive" + console.log(result.output.confidence); // 0.95 +} +``` + +### Output.array() + +Produce a validated array of structured elements: + +```ts +import { Output } from "ai"; +import { openai } from "@ai-sdk/openai"; + +const extractor = agent({ + name: "extractor", + model: openai("gpt-4.1"), + system: "Extract all entities from the text.", + output: Output.array({ + element: z.object({ + name: z.string(), + type: z.enum(["person", "organization", "location"]), + }), + }), +}); + +const result = await extractor.generate({ prompt: "Alice works at Acme Corp in New York." }); +if (result.ok) { + for (const entity of result.output) { + console.log(entity.name, entity.type); + } +} +``` + +### Output.choice() + +Classify input into one of a set of options: + +```ts +import { Output } from "ai"; +import { openai } from "@ai-sdk/openai"; + +const classifier = agent({ + name: "classifier", + model: openai("gpt-4.1"), + system: "Classify the support ticket priority.", + output: Output.choice({ + options: ["low", "medium", "high", "critical"] as const, + }), +}); + +const result = await classifier.generate({ prompt: "Server is completely down" }); +if (result.ok) { + console.log(result.output); // "critical" +} +``` + +### Zod Schema Auto-Wrapping + +Pass a raw Zod schema instead of an explicit `Output` strategy -- the framework wraps it automatically: + +```ts +// Equivalent to Output.object({ schema }) +const summarizer = agent({ + name: "summarizer", + model: openai("gpt-4.1"), + system: "Summarize the input text.", + output: z.object({ + summary: z.string(), + keyPoints: z.array(z.string()), + }), +}); + +// Equivalent to Output.array({ element }) +const tagGenerator = agent({ + name: "tag-generator", + model: openai("gpt-4.1"), + system: "Generate tags for the input.", + output: z.array( + z.object({ + tag: z.string(), + relevance: z.number(), + }), + ), +}); +``` + +### Per-Call Output Override + +Override the output strategy for a single call via `AgentOverrides`: + +```ts +const result = await helper.generate({ + prompt: "List three TypeScript features", + output: z.object({ + features: z.array(z.string()), + }), +}); +``` + +### Validation Error Handling + +When the model's output does not match the schema, the result contains a `VALIDATION_ERROR`: + +```ts +const result = await analyzer.generate({ prompt: "Analyze this" }); + +if (!result.ok && result.error.code === "VALIDATION_ERROR") { + console.error("Output did not match schema:", result.error.message); + if (result.error.cause) { + console.error("Zod error:", result.error.cause); + } +} +``` + +## References + +- [Create an Agent](create-agent.md) +- [Overview](overview.md) diff --git a/packages/agents/docs/overview.md b/packages/agents/docs/overview.md index 24217c6..981cdab 100644 --- a/packages/agents/docs/overview.md +++ b/packages/agents/docs/overview.md @@ -1,18 +1,6 @@ # Agent SDK -`@funkai/agents` is a lightweight agent orchestration framework built on the [Vercel AI SDK](https://ai-sdk.dev). It provides typed primitives for creating AI agents, tools, and multi-step workflows with observable execution traces. - -## Design Principles - -| Principle | Description | -| ------------------------------ | ------------------------------------------------------------------------------------------ | -| Functions all the way down | `agent()`, `tool()`, `workflow()` return plain objects, no classes | -| Composition over configuration | Combine small functions instead of large option bags | -| Closures are state | Workflow state is just variables in your handler | -| Result, never throw | Every public method returns `Result`, callers pattern-match | -| Zero hidden state | No singletons, no module-level registries | -| `$` is optional sugar | The `$` helpers register data flow for observability; you can always use plain `for` loops | -| Context is internal | The framework tracks execution state automatically | +`@funkai/agents` is a lightweight agent orchestration framework built on the [Vercel AI SDK](https://ai-sdk.dev). It provides typed primitives for creating AI agents, tools, and multi-step flow agents with observable execution traces. ## Architecture @@ -40,8 +28,8 @@ flowchart LR subgraph core [" "] tool["tool()"]:::coreNode agent["agent()"]:::coreNode - workflow["workflow()"]:::coreNode - engine["createWorkflowEngine()"]:::coreNode + flowAgent["flowAgent()"]:::coreNode + engine["createFlowEngine()"]:::coreNode end subgraph steps [" "] @@ -54,191 +42,54 @@ flowchart LR concOp["$.all / $.race"]:::step end - subgraph provider [" "] - direction LR - OpenRouter:::gateway - Model:::gateway - end - Input --> agent - Input --> workflow + Input --> flowAgent agent -- ".generate() / .stream()" --> Result:::coreNode - workflow -- ".generate() / .stream()" --> Result - engine --> workflow - workflow --> dollar + flowAgent -- ".generate() / .stream()" --> Result + engine --> flowAgent + flowAgent --> dollar dollar --> stepOp & agentOp & mapOp & reduceOp & concOp agentOp --> agent tool --> agent - agent --> OpenRouter --> Model classDef external fill:#313244,stroke:#f5c2e7,stroke-width:2px,color:#cdd6f4 classDef coreNode fill:#313244,stroke:#89b4fa,stroke-width:2px,color:#cdd6f4 classDef step fill:#313244,stroke:#a6e3a1,stroke-width:2px,color:#cdd6f4 - classDef gateway fill:#313244,stroke:#fab387,stroke-width:2px,color:#cdd6f4 style core fill:#181825,stroke:#89b4fa,stroke-width:2px style steps fill:#181825,stroke:#a6e3a1,stroke-width:2px - style provider fill:none,stroke:#fab387,stroke-width:2px,stroke-dasharray:5 5 -``` - -## Core Concepts - -### `tool()` - -Create tools for AI agent function calling. Wraps the AI SDK's `tool()` with `zodSchema()` conversion. - -```ts -const fetchPage = tool({ - description: "Fetch the contents of a web page by URL", - inputSchema: z.object({ url: z.url() }), - execute: async ({ url }) => { - const res = await fetch(url); - return { url, status: res.status, body: await res.text() }; - }, -}); -``` - -### `agent()` - -Create an agent with typed input, prompt template, tools, subagents, hooks, and `Result` return. Two modes: - -| Config | `.generate()` first param | How the prompt is built | -| ---------------------- | ------------------------- | ------------------------------ | -| `input` + `prompt` set | Typed `TInput` | `prompt({ input })` renders it | -| Both omitted | `string \| Message[]` | Passed directly to the model | - -```ts -const summarizer = agent({ - name: "summarizer", - model: "openai/gpt-4.1", - input: z.object({ text: z.string() }), - prompt: ({ input }) => `Summarize:\n\n${input.text}`, -}); - -const result = await summarizer.generate({ text: "..." }); -if (result.ok) { - console.log(result.output); -} ``` -Every agent exposes `.generate()`, `.stream()`, and `.fn()`. - -### `workflow()` - -Create a workflow with typed I/O, `$` step builder, hooks, and execution trace. The handler IS the workflow -- state is just variables. - -```ts -const wf = workflow( - { - name: "analyze", - input: InputSchema, - output: OutputSchema, - }, - async ({ input, $ }) => { - const data = await $.step({ - id: "fetch-data", - execute: async () => fetchData(input.id), - }); - - const result = await $.agent({ - id: "analyze", - agent: myAgent, - input: { data: data.value }, - }); - - return { data: data.value, analysis: result.ok ? result.output : null }; - }, -); -``` - -The `$` step builder provides tracked operations: - -| Method | Description | -| ---------- | ------------------------------------------------------------- | -| `$.step` | Execute a single unit of work | -| `$.agent` | Execute an agent call as a tracked operation | -| `$.map` | Parallel map over items (with optional concurrency limit) | -| `$.each` | Sequential side effects, returns void | -| `$.reduce` | Sequential accumulation, each step depends on previous result | -| `$.while` | Conditional loop, runs while a condition holds | -| `$.all` | Heterogeneous concurrent operations (like `Promise.all`) | -| `$.race` | Concurrent operations, first to finish wins | - -### `createWorkflowEngine()` - -Create a custom workflow factory that adds additional step types to `$` and/or sets default hooks. - -```ts -const engine = createWorkflowEngine({ - $: { - retry: async ({ ctx, config }) => { - // custom step implementation with access to ExecutionContext - }, - }, - onStart: ({ input }) => telemetry.trackStart(input), -}); -``` - -## Key Types - -### Result - -Every public method returns `Result` instead of throwing: - -```ts -type Result = (T & { ok: true }) | { ok: false; error: ResultError }; -``` - -Error codes: `VALIDATION_ERROR`, `AGENT_ERROR`, `WORKFLOW_ERROR`, `ABORT_ERROR`. Helpers: `ok()`, `err()`, `isOk()`, `isErr()`. - -### Runnable - -Both `Agent` and `Workflow` satisfy the `Runnable` interface, enabling composition. Subagents passed to `agent({ agents })` are automatically wrapped as callable tools. - -### Context - -Internal -- never exposed to users. The framework creates it automatically. Custom step factories (via `createWorkflowEngine`) receive `ExecutionContext` with `signal` and `log`. - -### Logger - -Pino-compatible interface with `child()` support. The framework creates scoped child loggers at each boundary (workflow, step, agent). - -## Provider - -OpenRouter integration for model resolution. The `Model` type accepted by `agent()` is `string | LanguageModel` -- string IDs are resolved via OpenRouter at runtime, or pass any AI SDK provider instance directly. - -| Export | Description | -| ---------------------------- | ---------------------------------------------------------------- | -| `openrouter(modelId)` | Returns a `LanguageModel` (cached provider, reused across calls) | -| `createOpenRouter(options?)` | Create a new OpenRouter provider instance | -| `model(id)` | Look up a `ModelDefinition` by ID (throws if not found) | -| `tryModel(id)` | Look up a `ModelDefinition` by ID (returns `undefined`) | -| `models(filter?)` | Return all model definitions, optionally filtered | - -## Execution Trace - -Workflows produce a frozen `TraceEntry[]` tree representing every tracked `$` operation: - -| Field | Type | Description | -| ------------ | --------------- | ------------------------------------------------------------------- | -| `id` | `string` | Step ID from the `$` config | -| `type` | `OperationType` | `step`, `agent`, `map`, `each`, `reduce`, `while`, `all`, or `race` | -| `startedAt` | `number` | Unix milliseconds | -| `finishedAt` | `number?` | Unix milliseconds (undefined while running) | -| `error` | `Error?` | Present on failure | -| `usage` | `TokenUsage?` | Token usage (populated for successful agent steps) | -| `children` | `TraceEntry[]?` | Nested operations (iterations, sub-steps) | - -## References - -- [Agent](core/agent.md) -- [Workflow](core/workflow.md) -- [Step Builder ($)](core/step.md) -- [Tools](core/tools.md) -- [Hooks](core/hooks.md) -- [Provider](provider/overview.md) -- [Models](provider/models.md) -- [Token Usage](provider/usage.md) -- [Create an Agent](guides/create-agent.md) -- [Create a Workflow](guides/create-workflow.md) -- [Create a Tool](guides/create-tool.md) +## Primitives + +| Primitive | Description | +| --------------------------------------------- | -------------------------------------------------------------------------------- | +| [`agent()`](create-agent.md) | Create an AI agent with typed I/O, tools, subagents, hooks, and `Result` return | +| [`flowAgent()`](create-flow-agent.md) | Create a multi-step orchestration flow with `$` step builder and execution trace | +| [`tool()`](tools.md) | Create tools for AI agent function calling | +| [`createFlowEngine()`](custom-flow-engine.md) | Create a flow agent factory with custom step types and shared hooks | + +## Key Concepts + +- **Result, never throw** -- Every public method returns `Result`. Pattern-match on `ok` instead of try/catch. +- **LanguageModel instances** -- Pass AI SDK provider instances directly: `model: openai("gpt-4.1")`. Use any `@ai-sdk/*` package. +- **$ is optional sugar** -- The `$` helpers register data flow for observability; plain imperative code works too. +- **Closures are state** -- Flow agent state is just `let` variables in your handler. + +## Documentation + +| Topic | Description | +| --------------------------------------------------------- | -------------------------------------------------------------------- | +| [Create an Agent](create-agent.md) | Build agents with typed I/O, tools, output strategies, and streaming | +| [Create a Flow Agent](create-flow-agent.md) | Build multi-step flows with `$` operations and execution traces | +| [Step Builder ($)](step-builder.md) | Reference for all 8 `$` methods | +| [Tools](tools.md) | Create and register tools for function calling | +| [Hooks](hooks.md) | Lifecycle callbacks for agents and flow agents | +| [Streaming](streaming.md) | Stream consumption patterns and StreamPart events | +| [Middleware](middleware.md) | Wrap language models with AI SDK middleware | +| [Output Strategies](output-strategies.md) | Structured output with Output.text/object/array/choice | +| [Custom Flow Engine](custom-flow-engine.md) | Build custom step types with createFlowEngine() | +| [Testing](test-agents.md) | Patterns for testing agents and flow agents | +| [Cost Tracking](cost-tracking.md) | Track token usage and calculate costs | +| [Error Recovery](error-recovery.md) | Retry, fallback, and circuit breaker patterns | +| [Multi-Agent Orchestration](multi-agent-orchestration.md) | Sequential, parallel, voting, and hierarchical patterns | diff --git a/packages/agents/docs/provider/models.md b/packages/agents/docs/provider/models.md index a5eaf10..38dcbd3 100644 --- a/packages/agents/docs/provider/models.md +++ b/packages/agents/docs/provider/models.md @@ -1,18 +1,17 @@ # Models -The SDK includes a model catalog with metadata and pricing for supported OpenRouter models. Used for cost calculation and model selection. - -Model data is auto-generated from the OpenRouter API. Run `pnpm --filter=@funkai/agents generate:models` to refresh. +For model metadata, pricing, and catalog lookups, use the `@funkai/models` package. ## Model Definition Each model entry has: -| Field | Type | Description | -| ---------- | --------------- | --------------------------------------------- | -| `id` | `string` | OpenRouter model ID (e.g. `'openai/gpt-4.1'`) | -| `category` | `ModelCategory` | `'chat'`, `'coding'`, or `'reasoning'` | -| `pricing` | `ModelPricing` | Per-token rates (OpenRouter convention) | +| Field | Type | Description | +| -------------- | ------------------- | --------------------------------------------- | +| `id` | `string` | Model ID (e.g. `'openai/gpt-4.1'`) | +| `capabilities` | `ModelCapabilities` | Boolean flags (reasoning, tools, vision, etc) | +| `pricing` | `ModelPricing` | Per-token rates in USD | +| `modalities` | `ModelModalities` | Input/output modality descriptors | ## Pricing @@ -28,28 +27,47 @@ Each model entry has: ## Lookup -Three functions: - -- `model(id)` -- Returns a single `ModelDefinition` or throws if the ID is not in the catalog. -- `tryModel(id)` -- Returns a single `ModelDefinition` or `undefined` if the ID is not in the catalog. -- `models(filter?)` -- Returns model definitions, optionally filtered by a predicate. - ```ts -import { model, tryModel, models } from "@funkai/agents"; +import { model, models } from "@funkai/models"; -const m = model("openai/gpt-4.1"); -console.log(m.pricing.prompt); // cost per input token -console.log(m.category); // 'chat' +// Look up a single model (returns null if not found) +const gpt4 = model("openai/gpt-4.1"); +if (gpt4) { + console.log(gpt4.pricing.prompt); // cost per input token +} +// List all models, optionally filtered const all = models(); -const reasoning = models((m) => m.category === "reasoning"); +const reasoning = models((m) => m.capabilities.reasoning); ``` -## Adding a Model +## Using with Agents -Add an entry to `models.config.json` at the package root with the OpenRouter model ID and category, then run `pnpm --filter=@funkai/agents generate:models` to fetch pricing from the API. +Pass AI SDK provider instances directly to agents -- model catalog lookups are separate from model resolution: + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { model, calculateCost } from "@funkai/models"; + +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are helpful.", +}); + +const result = await helper.generate({ prompt: "Hello" }); +if (result.ok) { + const pricing = model("openai/gpt-4.1")?.pricing; + if (pricing) { + const cost = calculateCost(result.usage, pricing); + console.log(`Cost: $${cost.total.toFixed(6)}`); + } +} +``` ## References - [Provider Overview](overview.md) - [Token Usage](usage.md) +- [@funkai/models docs](/models) diff --git a/packages/agents/docs/provider/overview.md b/packages/agents/docs/provider/overview.md index 012d46b..564bb3e 100644 --- a/packages/agents/docs/provider/overview.md +++ b/packages/agents/docs/provider/overview.md @@ -1,106 +1,91 @@ # Provider Overview -The provider module integrates with OpenRouter for model access and provides a model catalog with pricing data. +Agents require an AI SDK `LanguageModel` instance for their `model` field. The `@funkai/agents` package does not bundle any provider -- you bring your own from the AI SDK ecosystem. -## OpenRouter +## Passing a Model -All models are accessed via OpenRouter. The `openrouter()` function creates a language model from a model ID. +Use any `@ai-sdk/*` provider package to create a language model instance: ```ts -import { openrouter } from "@funkai/agents"; +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; -const m = openrouter("openai/gpt-4.1"); +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", +}); ``` -The provider instance is cached at module scope and reused across calls. If `OPENROUTER_API_KEY` changes at runtime, the cache is invalidated and a new provider is created. +## Supported Providers -## API Key +Any package that returns an AI SDK v3 `LanguageModel` works: -Resolved from the `OPENROUTER_API_KEY` environment variable. Throws if not set. +| Package | Example | +| ----------------------------- | --------------------------------------- | +| `@ai-sdk/openai` | `openai("gpt-4.1")` | +| `@ai-sdk/anthropic` | `anthropic("claude-sonnet-4-20250514")` | +| `@ai-sdk/google` | `google("gemini-2.5-pro")` | +| `@openrouter/ai-sdk-provider` | `createOpenRouter()("openai/gpt-4.1")` | -```ts -// Override with a custom provider instance -import { createOpenRouter } from "@funkai/agents"; +## Dynamic Model Resolution + +Use a resolver function to pick models at runtime: -const provider = createOpenRouter({ apiKey: "sk-..." }); -const m = provider("openai/gpt-4.1"); +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { anthropic } from "@ai-sdk/anthropic"; + +const helper = agent({ + name: "helper", + model: ({ input }) => + input.fast ? openai("gpt-4.1-mini") : anthropic("claude-sonnet-4-20250514"), + input: z.object({ fast: z.boolean() }), + system: "You are a helpful assistant.", +}); ``` ## Model Catalog -Models are defined in `models.config.json` and auto-generated into provider-specific files. Use the catalog functions to look up model definitions and pricing. +For model metadata and pricing, use `@funkai/models`: ```ts -import { model, tryModel, models } from "@funkai/agents"; +import { model, models } from "@funkai/models"; -// Look up a model (throws if not found) const gpt4 = model("openai/gpt-4.1"); -console.log(gpt4.pricing.prompt); // cost per input token +console.log(gpt4?.pricing.prompt); // cost per input token -// Safe lookup (returns undefined if not found) -const maybe = tryModel("openai/gpt-4.1"); - -// List all models, optionally filtered -const allModels = models(); -const reasoningModels = models((m) => m.category === "reasoning"); +const reasoning = models((m) => m.capabilities.reasoning); ``` ## Token Usage -Aggregate token counts across agent and workflow executions. +Aggregate token counts across agent and flow agent executions: ```ts -import { agentUsage, workflowUsage } from "@funkai/agents"; - -// Single agent usage -const usage = agentUsage("my-agent", tokenRecords); -console.log(usage.inputTokens, usage.outputTokens, usage.totalTokens); - -// Workflow usage with per-agent breakdown -const wfUsage = workflowUsage(allTokenRecords); -for (const entry of wfUsage.usages) { - console.log(`${entry.agentId}: ${entry.totalTokens} tokens`); +import { usage, usageByAgent, usageByModel, collectUsages } from "@funkai/agents"; + +const result = await myFlowAgent.generate({ input: { topic: "closures" } }); +if (result.ok) { + const records = collectUsages(result.trace); + const total = usage(records); + const byAgent = usageByAgent(records); + const byModel = usageByModel(records); } ``` ## Exports -| Export | Description | -| ------------------------------ | ----------------------------------------------------------------- | -| `openrouter(modelId)` | Create a language model from OpenRouter (cached provider) | -| `createOpenRouter(options?)` | Create a custom OpenRouter provider instance | -| `model(id)` | Look up a model definition from the catalog (throws if not found) | -| `tryModel(id)` | Look up a model definition (returns `undefined` if not found) | -| `models(filter?)` | Get model definitions, optionally filtered by predicate | -| `agentUsage(agentId, records)` | Aggregate token counts for a single agent | -| `workflowUsage(records)` | Aggregate token counts for a workflow with per-agent breakdown | - -## Model Reference - -String model IDs passed to `agent()` or `openrouter()` are resolved via OpenRouter at runtime. You can also pass an AI SDK `LanguageModel` instance directly. - -```ts -import { agent } from "@funkai/agents"; - -// String ID -- resolved via OpenRouter -const a1 = agent({ - name: "my-agent", - model: "openai/gpt-4.1", - system: "You are helpful.", -}); - -// AI SDK provider instance -- bypasses OpenRouter -import { openai } from "@ai-sdk/openai"; - -const a2 = agent({ - name: "my-agent", - model: openai("gpt-4.1"), - system: "You are helpful.", -}); -``` +| Export | Description | +| ----------------------- | ----------------------------------------------- | +| `usage(records)` | Sum all token usage records into a single total | +| `usageByAgent(records)` | Group and sum usage by agent ID | +| `usageByModel(records)` | Group and sum usage by model ID | +| `collectUsages(trace)` | Walk a trace tree and collect all usage records | ## References - [Models](models.md) +- [Token Usage](usage.md) - [Create an Agent](../guides/create-agent.md) -- [Troubleshooting](../troubleshooting.md) diff --git a/packages/agents/docs/provider/usage.md b/packages/agents/docs/provider/usage.md index 47031e9..2c10f61 100644 --- a/packages/agents/docs/provider/usage.md +++ b/packages/agents/docs/provider/usage.md @@ -1,6 +1,6 @@ # Token Usage -Token tracking and aggregation for agent and workflow executions. +Token tracking and aggregation for agent and flow agent executions. ## TokenUsageRecord @@ -8,7 +8,7 @@ Raw tracking record from a single model invocation. Fields are `number | undefin | Field | Type | Description | | ------------------ | --------------------- | ---------------------------------------- | -| `modelId` | `string` | OpenRouter model ID | +| `modelId` | `string` | Model ID | | `inputTokens` | `number \| undefined` | Input (prompt) tokens | | `outputTokens` | `number \| undefined` | Output (completion) tokens | | `totalTokens` | `number \| undefined` | Input + output | @@ -21,37 +21,58 @@ The `source` field identifies which component produced the record: ```ts source?: { - workflowId?: string + flowAgentId?: string stepId?: string agentId: string scope: string[] } ``` -## Agent Usage +## Usage Aggregation -`agentUsage()` aggregates token counts from one or more raw records into a flat `AgentTokenUsage` object. +### `usage()` + +Sum all token usage records into a single flat `TokenUsage`: ```ts -import { agentUsage } from "@funkai/agents"; +import { usage, collectUsages } from "@funkai/agents"; -const usage = agentUsage("my-agent", records); -console.log(usage.agentId); // 'my-agent' -console.log(usage.inputTokens); // resolved number (0 if undefined) -console.log(usage.outputTokens); -console.log(usage.totalTokens); +const result = await myFlowAgent.generate({ input: { topic: "closures" } }); +if (result.ok) { + const total = usage(collectUsages(result.trace)); + console.log(total.inputTokens, total.outputTokens, total.totalTokens); +} ``` -## Workflow Usage +### `usageByAgent()` -`workflowUsage()` groups records by `source.agentId` and computes per-agent usage. +Group records by agent ID and compute per-agent usage: ```ts -import { workflowUsage } from "@funkai/agents"; +import { usageByAgent, collectUsages } from "@funkai/agents"; -const usage = workflowUsage(allRecords); -for (const entry of usage.usages) { - console.log(`${entry.agentId}: ${entry.totalTokens} tokens`); +const result = await myFlowAgent.generate({ input: { topic: "closures" } }); +if (result.ok) { + const byAgent = usageByAgent(collectUsages(result.trace)); + for (const entry of byAgent) { + console.log(`${entry.source.agentId}: ${entry.totalTokens} tokens`); + } +} +``` + +### `usageByModel()` + +Group records by model ID and compute per-model usage: + +```ts +import { usageByModel, collectUsages } from "@funkai/agents"; + +const result = await myFlowAgent.generate({ input: { topic: "closures" } }); +if (result.ok) { + const byModel = usageByModel(collectUsages(result.trace)); + for (const entry of byModel) { + console.log(`${entry.modelId}: ${entry.totalTokens} tokens`); + } } ``` @@ -68,29 +89,16 @@ The aggregated output type. All fields are resolved `number` (0 when the raw rec | `cacheWriteTokens` | `number` | Cache write tokens | | `reasoningTokens` | `number` | Internal reasoning tokens | -## Usage Utilities - -### `sumTokenUsage()` - -Sum multiple `TokenUsage` objects into a new one. Pure function, does not mutate inputs. - -```ts -import { sumTokenUsage } from "@funkai/agents"; - -const total = sumTokenUsage([usageA, usageB, usageC]); -``` - -### `collectUsages()` +## `collectUsages()` -Walk a `TraceEntry[]` tree and collect all `usage` values into a flat array (recursively including children). Compose with `sumTokenUsage()` to aggregate usage across all `$.agent()` calls. +Walk a `TraceEntry[]` tree and collect all `usage` values into a flat array (recursively including children). Compose with `usage()` to aggregate across all operations. ```ts -import { collectUsages, sumTokenUsage } from "@funkai/agents"; +import { collectUsages, usage } from "@funkai/agents"; -const result = await myWorkflow.generate(input); +const result = await myFlowAgent.generate({ input: { topic: "closures" } }); if (result.ok) { - // result.usage is already computed, but you can also derive it from the trace: - const usage = sumTokenUsage(collectUsages(result.trace)); + const total = usage(collectUsages(result.trace)); } ``` diff --git a/packages/agents/docs/reference/output-strategies.md b/packages/agents/docs/reference/output-strategies.md index 874ee87..5f33f2c 100644 --- a/packages/agents/docs/reference/output-strategies.md +++ b/packages/agents/docs/reference/output-strategies.md @@ -75,11 +75,11 @@ When `output` is omitted, agents produce plain string output: ```ts const helper = agent({ name: "helper", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "You are helpful.", }); -const result = await helper.generate("What is TypeScript?"); +const result = await helper.generate({ prompt: "What is TypeScript?" }); if (result.ok) { console.log(result.output); // string } @@ -91,10 +91,11 @@ Produce a validated structured object: ```ts import { Output } from "ai"; +import { openai } from "@ai-sdk/openai"; const analyzer = agent({ name: "analyzer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "Analyze the sentiment of the given text.", output: Output.object({ schema: z.object({ @@ -105,7 +106,7 @@ const analyzer = agent({ }), }); -const result = await analyzer.generate("I love this product!"); +const result = await analyzer.generate({ prompt: "I love this product!" }); if (result.ok) { console.log(result.output.sentiment); // "positive" console.log(result.output.confidence); // 0.95 @@ -118,10 +119,11 @@ Produce a validated array of structured elements: ```ts import { Output } from "ai"; +import { openai } from "@ai-sdk/openai"; const extractor = agent({ name: "extractor", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "Extract all entities from the text.", output: Output.array({ element: z.object({ @@ -131,7 +133,7 @@ const extractor = agent({ }), }); -const result = await extractor.generate("Alice works at Acme Corp in New York."); +const result = await extractor.generate({ prompt: "Alice works at Acme Corp in New York." }); if (result.ok) { for (const entity of result.output) { console.log(entity.name, entity.type); @@ -145,17 +147,18 @@ Classify input into one of a set of options: ```ts import { Output } from "ai"; +import { openai } from "@ai-sdk/openai"; const classifier = agent({ name: "classifier", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "Classify the support ticket priority.", output: Output.choice({ options: ["low", "medium", "high", "critical"] as const, }), }); -const result = await classifier.generate("Server is completely down"); +const result = await classifier.generate({ prompt: "Server is completely down" }); if (result.ok) { console.log(result.output); // "critical" } @@ -169,7 +172,7 @@ Pass a raw Zod schema instead of an explicit `Output` strategy -- the framework // Equivalent to Output.object({ schema }) const summarizer = agent({ name: "summarizer", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "Summarize the input text.", output: z.object({ summary: z.string(), @@ -180,7 +183,7 @@ const summarizer = agent({ // Equivalent to Output.array({ element }) const tagGenerator = agent({ name: "tag-generator", - model: "openai/gpt-4.1", + model: openai("gpt-4.1"), system: "Generate tags for the input.", output: z.array( z.object({ @@ -196,7 +199,8 @@ const tagGenerator = agent({ Override the output strategy for a single call via `AgentOverrides`: ```ts -const result = await helper.generate("List three TypeScript features", { +const result = await helper.generate({ + prompt: "List three TypeScript features", output: z.object({ features: z.array(z.string()), }), @@ -208,7 +212,7 @@ const result = await helper.generate("List three TypeScript features", { When the model's output does not match the schema, the result contains a `VALIDATION_ERROR`: ```ts -const result = await analyzer.generate("Analyze this"); +const result = await analyzer.generate({ prompt: "Analyze this" }); if (!result.ok && result.error.code === "VALIDATION_ERROR") { console.error("Output did not match schema:", result.error.message); diff --git a/packages/agents/docs/research/experimental-context.md b/packages/agents/docs/research/experimental-context.md index 4f5acf1..bd75fad 100644 --- a/packages/agents/docs/research/experimental-context.md +++ b/packages/agents/docs/research/experimental-context.md @@ -40,7 +40,7 @@ generateText({ experimental_context: initialState }) ```typescript const result = await generateText({ - model: openrouter("anthropic/claude-sonnet-4"), + model: anthropic("claude-sonnet-4-20250514"), tools: { fetchData: tool({ description: "Fetch data from the API", diff --git a/packages/agents/docs/research/prepare-step-and-active-tools.md b/packages/agents/docs/research/prepare-step-and-active-tools.md index 03c44d4..8a663ec 100644 --- a/packages/agents/docs/research/prepare-step-and-active-tools.md +++ b/packages/agents/docs/research/prepare-step-and-active-tools.md @@ -105,7 +105,7 @@ prepareStep: async ({ steps }) => { const lastStep = steps.at(-1); // Switch to a more capable model if the task is complex if (lastStep?.toolCalls.length > 3) { - return { model: openrouter("anthropic/claude-sonnet-4") }; + return { model: anthropic("claude-sonnet-4-20250514") }; } return {}; }; diff --git a/packages/agents/docs/research/sub-agent-model.md b/packages/agents/docs/research/sub-agent-model.md index 7db2218..e64da31 100644 --- a/packages/agents/docs/research/sub-agent-model.md +++ b/packages/agents/docs/research/sub-agent-model.md @@ -17,7 +17,7 @@ Sub-agents are declared in `AgentConfig.agents` and auto-wrapped into AI SDK too ```typescript const parent = agent({ name: "orchestrator", - model: "anthropic/claude-sonnet-4", + model: anthropic("claude-sonnet-4-20250514"), tools: { search, readFile }, agents: { researcher, coder }, // auto-wrapped as callable tools }); diff --git a/packages/agents/docs/step-builder.md b/packages/agents/docs/step-builder.md new file mode 100644 index 0000000..87d2ad5 --- /dev/null +++ b/packages/agents/docs/step-builder.md @@ -0,0 +1,283 @@ +# $ StepBuilder + +The `$` object is passed into every flow agent handler and step callback. It provides tracked operations that register data flow in the execution trace. Every call through `$` becomes a `TraceEntry`. + +`$` is passed into every callback, enabling composition and nesting. You can always skip `$` and use plain imperative code -- it just will not appear in the trace. + +## StepResult + +All `$` methods return `Promise>`: + +```ts +type StepResult = + | { ok: true; value: T; step: StepInfo; duration: number } + | { ok: false; error: StepError; step: StepInfo; duration: number }; +``` + +`StepInfo` identifies the step: + +```ts +interface StepInfo { + id: string; // from the $ config's `id` field + index: number; // auto-incrementing within the flow agent + type: OperationType; // 'step' | 'agent' | 'map' | 'each' | 'reduce' | 'while' | 'all' | 'race' +} +``` + +`StepError` extends `ResultError` with `stepId: string`. + +## $.step + +Single unit of work. + +```ts +$.step(config: StepConfig): Promise> +``` + +| Field | Required | Type | Description | +| ---------- | -------- | ------------------------------------------------------------ | ---------------------------- | +| `id` | Yes | `string` | Unique step identifier | +| `execute` | Yes | `(params: { $ }) => Promise` | The step's logic | +| `onStart` | No | `(event: { id }) => void \| Promise` | Hook: fires when step starts | +| `onFinish` | No | `(event: { id, result, duration }) => void \| Promise` | Hook: fires on success | +| `onError` | No | `(event: { id, error }) => void \| Promise` | Hook: fires on error | + +```ts +const data = await $.step({ + id: "fetch-data", + execute: async () => { + return await fetchData(); + }, +}); + +if (data.ok) { + console.log(data.value); // T +} +``` + +## $.agent + +Agent call as a tracked operation. Calls `agent.generate()` internally and unwraps the result -- agent errors become `StepError`, agent success becomes `StepResult`. + +```ts +$.agent(config: AgentStepConfig): Promise> +``` + +| Field | Required | Type | Description | +| ---------- | -------- | ------------------ | ------------------------------------ | +| `id` | Yes | `string` | Unique step identifier | +| `agent` | Yes | `Runnable` | The agent (or flow agent) to invoke | +| `input` | Yes | `TInput` | Input to pass to the agent | +| `config` | No | `AgentOverrides` | Inline overrides for this agent call | +| `onStart` | No | hook | Hook: fires when step starts | +| `onFinish` | No | hook | Hook: fires on success | +| `onError` | No | hook | Hook: fires on error | + +The framework automatically passes the abort signal and a scoped logger to the agent. + +```ts +const result = await $.agent({ + id: "analyze", + agent: analyzerAgent, + input: { files: ["src/main.ts"] }, +}); + +if (result.ok) { + console.log(result.value.output); // the agent's output + console.log(result.value.messages); // full message history +} +``` + +## $.map + +Parallel map with optional concurrency limit. All items run concurrently (up to `concurrency` limit). Returns results in input order. + +```ts +$.map(config: MapConfig): Promise> +``` + +| Field | Required | Type | Description | +| ------------- | -------- | -------------------------------------------- | ------------------------------------------- | +| `id` | Yes | `string` | Unique step identifier | +| `input` | Yes | `T[]` | Array of items to process | +| `execute` | Yes | `(params: { item, index, $ }) => Promise` | Process a single item | +| `concurrency` | No | `number` | Max parallel executions (default: Infinity) | +| `onStart` | No | hook | Hook: fires when map starts | +| `onFinish` | No | hook | Hook: fires when all items complete | +| `onError` | No | hook | Hook: fires on error | + +```ts +const results = await $.map({ + id: "process-files", + input: files, + concurrency: 5, + execute: async ({ item, index }) => { + return await processFile(item); + }, +}); +``` + +## $.each + +Sequential side effects. Runs items one at a time in order. Returns `void`. Checks abort signal before each iteration. + +```ts +$.each(config: EachConfig): Promise> +``` + +| Field | Required | Type | Description | +| ---------- | -------- | ----------------------------------------------- | ----------------------------- | +| `id` | Yes | `string` | Unique step identifier | +| `input` | Yes | `T[]` | Array of items to process | +| `execute` | Yes | `(params: { item, index, $ }) => Promise` | Process a single item | +| `onStart` | No | hook | Hook: fires when each starts | +| `onFinish` | No | hook | Hook: fires when all complete | +| `onError` | No | hook | Hook: fires on error | + +```ts +await $.each({ + id: "notify-users", + input: users, + execute: async ({ item }) => { + await sendNotification(item.email); + }, +}); +``` + +## $.reduce + +Sequential accumulation. Each step depends on the previous result. Checks abort signal before each iteration. + +```ts +$.reduce(config: ReduceConfig): Promise> +``` + +| Field | Required | Type | Description | +| ---------- | -------- | --------------------------------------------------------- | ------------------------------ | +| `id` | Yes | `string` | Unique step identifier | +| `input` | Yes | `T[]` | Array of items to reduce | +| `initial` | Yes | `R` | Initial accumulator value | +| `execute` | Yes | `(params: { item, accumulator, index, $ }) => Promise` | Reduce function | +| `onStart` | No | hook | Hook: fires when reduce starts | +| `onFinish` | No | hook | Hook: fires when done | +| `onError` | No | hook | Hook: fires on error | + +```ts +const total = await $.reduce({ + id: "sum-scores", + input: items, + initial: 0, + execute: async ({ item, accumulator }) => { + return accumulator + item.score; + }, +}); +``` + +## $.while + +Conditional loop. Runs while a condition holds. Returns the last value, or `undefined` if the condition was false on first check. Checks abort signal before each iteration. + +```ts +$.while(config: WhileConfig): Promise> +``` + +| Field | Required | Type | Description | +| ----------- | -------- | --------------------------------------- | ---------------------------------------------- | +| `id` | Yes | `string` | Unique step identifier | +| `condition` | Yes | `(params: { value, index }) => boolean` | Loop condition (checked before each iteration) | +| `execute` | Yes | `(params: { index, $ }) => Promise` | Execute one iteration | +| `onStart` | No | hook | Hook: fires when while starts | +| `onFinish` | No | hook | Hook: fires when loop ends | +| `onError` | No | hook | Hook: fires on error | + +The `condition` receives the last iteration's value (or `undefined` before the first iteration) and the current iteration index. + +```ts +const result = await $.while({ + id: "poll-status", + condition: ({ value, index }) => index < 10 && value !== "complete", + execute: async ({ index }) => { + await sleep(1000); + return await checkStatus(); + }, +}); +``` + +## $.all + +Concurrent heterogeneous operations -- like `Promise.all`. Entries are factory functions that receive an `AbortSignal` and return a promise. The framework creates an `AbortController`, links it to the parent signal, and starts all factories at the same time. + +```ts +$.all(config: AllConfig): Promise> +``` + +| Field | Required | Type | Description | +| ---------- | -------- | ---------------- | ------------------------------------- | +| `id` | Yes | `string` | Unique step identifier | +| `entries` | Yes | `EntryFactory[]` | Factory functions to run concurrently | +| `onStart` | No | hook | Hook: fires when all starts | +| `onFinish` | No | hook | Hook: fires when all complete | +| `onError` | No | hook | Hook: fires on error | + +Where `EntryFactory = (signal: AbortSignal, $: StepBuilder) => Promise`. + +```ts +const result = await $.all({ + id: "fetch-data", + entries: [(signal) => fetchUsers(signal), (signal) => fetchRepos(signal)], +}); + +if (result.ok) { + const [users, repos] = result.value; +} +``` + +## $.race + +First-to-finish wins. Same `entries: EntryFactory[]` pattern as `$.all`. Losers are cancelled via abort signal when the winner resolves. + +```ts +$.race(config: RaceConfig): Promise> +``` + +| Field | Required | Type | Description | +| ---------- | -------- | ---------------- | -------------------------------- | +| `id` | Yes | `string` | Unique step identifier | +| `entries` | Yes | `EntryFactory[]` | Factory functions to race | +| `onStart` | No | hook | Hook: fires when race starts | +| `onFinish` | No | hook | Hook: fires when winner resolves | +| `onError` | No | hook | Hook: fires on error | + +```ts +const result = await $.race({ + id: "fastest-provider", + entries: [(signal) => fetchFromProviderA(signal), (signal) => fetchFromProviderB(signal)], +}); + +if (result.ok) { + const fastest = result.value; +} +``` + +## Nesting + +`$` is passed into every callback so you can nest operations freely. Nested operations appear as `children` in the parent's trace entry. + +```ts +const result = await $.step({ + id: "outer", + execute: async ({ $ }) => { + const inner = await $.step({ + id: "inner", + execute: async () => "nested value", + }); + return inner.ok ? inner.value : "fallback"; + }, +}); +``` + +## References + +- [Flow Agent](create-flow-agent.md) +- [Hooks](hooks.md) +- [Overview](overview.md) diff --git a/packages/agents/docs/streaming.md b/packages/agents/docs/streaming.md new file mode 100644 index 0000000..4634e78 --- /dev/null +++ b/packages/agents/docs/streaming.md @@ -0,0 +1,237 @@ +# Streaming + +Streaming lets consumers process generation output incrementally as it arrives, rather than waiting for completion. Both `Agent` and `FlowAgent` support streaming via `.stream()`, returning a `StreamResult` with a live `fullStream` of typed events. + +## Architecture + +```mermaid +%%{init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#313244', + 'primaryTextColor': '#cdd6f4', + 'primaryBorderColor': '#6c7086', + 'lineColor': '#89b4fa', + 'secondaryColor': '#45475a', + 'tertiaryColor': '#1e1e2e', + 'actorBkg': '#313244', + 'actorBorder': '#89b4fa', + 'actorTextColor': '#cdd6f4', + 'signalColor': '#cdd6f4', + 'signalTextColor': '#cdd6f4' + } +}}%% +sequenceDiagram + participant C as Consumer + participant A as Agent + participant M as Model + + C->>A: agent.stream(input) + A-->>C: Result + + rect rgb(49, 50, 68) + Note over C,M: Stream consumption + M-->>A: text-delta + A-->>C: StreamPart (text-delta) + M-->>A: tool-call + A-->>C: StreamPart (tool-call) + M-->>A: tool-result + A-->>C: StreamPart (tool-result) + M-->>A: finish + A-->>C: StreamPart (finish) + end + + C->>C: await result.output + C->>C: await result.messages +``` + +## Key Concepts + +### StreamResult + +Returned by `.stream()` inside a `Result` wrapper. The `fullStream` is available immediately; other fields are promises that resolve after the stream completes. + +```ts +interface StreamResult { + output: Promise; + messages: Promise; + usage: Promise; + finishReason: Promise; + fullStream: AsyncIterableStream; +} +``` + +| Field | Type | When Available | +| -------------- | --------------------------------- | ---------------------- | +| `fullStream` | `AsyncIterableStream` | Immediately | +| `output` | `Promise` | After stream completes | +| `messages` | `Promise` | After stream completes | +| `usage` | `Promise` | After stream completes | +| `finishReason` | `Promise` | After stream completes | + +### StreamPart Events + +The `fullStream` emits `StreamPart` events -- a discriminated union from the AI SDK (`TextStreamPart`). Use `part.type` to discriminate: + +| `type` | Description | Key Fields | +| --------------- | -------------------------- | ----------------------- | +| `"text-delta"` | Incremental text output | `textDelta: string` | +| `"tool-call"` | Model invoked a tool | `toolName`, `args` | +| `"tool-result"` | Tool execution completed | `toolName`, `result` | +| `"step-finish"` | A tool-loop step completed | `usage`, `finishReason` | +| `"finish"` | Generation completed | `usage`, `finishReason` | +| `"error"` | An error occurred | `error` | + +### Dual Consumption + +`AsyncIterableStream` supports both `for await...of` and `.getReader()`: + +```ts +// AsyncIterable +for await (const part of result.fullStream) { + // handle part +} + +// ReadableStream +const reader = result.fullStream.getReader(); +while (true) { + const { done, value } = await reader.read(); + if (done) break; + // handle value +} +``` + +## Usage + +### Basic Streaming + +```ts +const result = await myAgent.stream({ prompt: "Tell me a story" }); + +if (!result.ok) { + console.error(result.error.message); + return; +} + +for await (const part of result.fullStream) { + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } +} + +const finalOutput = await result.output; +``` + +### Handling Multiple Event Types + +```ts +import { match } from "ts-pattern"; + +const result = await myAgent.stream({ prompt: "Search and summarize" }); +if (!result.ok) return; + +for await (const part of result.fullStream) { + match(part) + .with({ type: "text-delta" }, (p) => { + process.stdout.write(p.textDelta); + }) + .with({ type: "tool-call" }, (p) => { + console.log(`Calling tool: ${p.toolName}`); + }) + .with({ type: "tool-result" }, (p) => { + console.log(`Tool ${p.toolName} returned result`); + }) + .with({ type: "error" }, (p) => { + console.error("Stream error:", p.error); + }) + .otherwise(() => {}); +} +``` + +### Streaming with Flow Agents + +Flow agents support streaming via `$.agent()` with `stream: true`. When the flow agent itself is invoked via `.stream()`, agent steps configured with `stream: true` pipe their text output through the parent flow's stream: + +```ts +const pipeline = flowAgent( + { + name: "content-pipeline", + input: z.object({ topic: z.string() }), + output: z.object({ article: z.string() }), + }, + async ({ input, $ }) => { + const research = await $.agent({ + id: "research", + agent: researcher, + input: input.topic, + stream: true, + }); + + if (!research.ok) throw new Error("Research failed"); + + return { article: research.value.output }; + }, +); + +const result = await pipeline.stream({ input: { topic: "TypeScript patterns" } }); +if (result.ok) { + for await (const part of result.fullStream) { + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } + } +} +``` + +### Error Handling + +Errors in the stream can appear as `StreamPart` events or as rejected promises on the result fields: + +```ts +const result = await myAgent.stream({ prompt: "Generate content" }); +if (!result.ok) { + console.error("Failed to start stream:", result.error.message); + return; +} + +try { + for await (const part of result.fullStream) { + if (part.type === "error") { + console.error("Stream error:", part.error); + } + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } + } +} catch (err) { + console.error("Stream iteration failed:", err); +} +``` + +### Cancellation + +Pass an `AbortSignal` to cancel streaming: + +```ts +const controller = new AbortController(); + +const result = await myAgent.stream({ + prompt: "Long generation", + signal: controller.signal, +}); + +if (result.ok) { + setTimeout(() => controller.abort(), 5000); + + for await (const part of result.fullStream) { + if (part.type === "text-delta") { + process.stdout.write(part.textDelta); + } + } +} +``` + +## References + +- [`agent()` reference](/reference/agents/agent) +- [`flowAgent()` reference](/reference/agents/flow-agent) diff --git a/packages/agents/docs/test-agents.md b/packages/agents/docs/test-agents.md new file mode 100644 index 0000000..239a42b --- /dev/null +++ b/packages/agents/docs/test-agents.md @@ -0,0 +1,374 @@ +# Test Agents and Flow Agents + +Patterns for unit testing agents, flow agents, and tools with mocked models and deterministic assertions. + +## Prerequisites + +- `@funkai/agents` installed +- Vitest configured (`pnpm test --filter=@funkai/agents`) +- Familiarity with `agent()`, `flowAgent()`, and `tool()` APIs + +## Steps + +### 1. Use per-call model overrides for integration smoke tests + +Agents accept a `model` override on each `.generate()` call. This is useful for low-cost integration smoke tests. For deterministic unit tests, use a fixed mock model/test double. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; +import { describe, it, expect } from "vitest"; +import { simulateReadableStream } from "ai"; + +const summarizer = agent({ + name: "summarizer", + model: openai("gpt-4.1"), + input: z.object({ text: z.string() }), + prompt: ({ input }) => `Summarize:\n\n${input.text}`, +}); + +describe("summarizer", () => { + it("returns a summary", async () => { + // Create a mock model that returns a fixed response + const mockModel = { + doGenerate: async () => ({ + text: "This is a fixed test summary", + finishReason: "stop", + usage: { promptTokens: 10, completionTokens: 5 }, + }), + }; + + const result = await summarizer.generate({ + input: { text: "Long article content..." }, + model: mockModel as any, + }); + + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.output).toBe("This is a fixed test summary"); + } + }); +}); +``` + +### 2. Assert on Result shape + +Every agent and flow agent returns `Result`. Test both success and error paths by checking `result.ok`. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { describe, it, expect } from "vitest"; + +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "You are a helpful assistant.", +}); + +describe("helper", () => { + it("succeeds with a string output", async () => { + const result = await helper.generate({ prompt: "What is TypeScript?" }); + + if (result.ok) { + expect(result.output).toBeDefined(); + expect(result.messages.length).toBeGreaterThan(0); + expect(result.usage.totalTokens).toBeGreaterThanOrEqual(0); + } + }); + + it("fails gracefully on abort", async () => { + const controller = new AbortController(); + controller.abort(); + + const result = await helper.generate({ + prompt: "This will be cancelled", + signal: controller.signal, + }); + + expect(result.ok).toBe(false); + if (!result.ok) { + expect(result.error.code).toBeDefined(); + } + }); +}); +``` + +### 3. Test typed agents with structured output + +When an agent has an `output` schema, assert on the typed shape of `result.output`. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; +import { describe, it, expect } from "vitest"; + +const classifier = agent({ + name: "classifier", + model: openai("gpt-4.1"), + output: z.object({ + category: z.enum(["bug", "feature", "question"]), + confidence: z.number(), + }), + input: z.object({ title: z.string(), body: z.string() }), + prompt: ({ input }) => `Classify this issue:\n\nTitle: ${input.title}\nBody: ${input.body}`, +}); + +describe("classifier", () => { + it("returns structured output matching the schema", async () => { + const result = await classifier.generate({ + input: { + title: "App crashes on login", + body: "When I click the login button, the app crashes.", + }, + }); + + if (result.ok) { + expect(["bug", "feature", "question"]).toContain(result.output.category); + expect(result.output.confidence).toBeGreaterThanOrEqual(0); + expect(result.output.confidence).toBeLessThanOrEqual(1); + } + }); +}); +``` + +### 4. Test tools in isolation + +Tools are plain functions with input validation. Test them independently of any agent by calling `execute` directly. + +```ts +import { tool } from "@funkai/agents"; +import { z } from "zod"; +import { describe, it, expect } from "vitest"; + +const add = async ({ a, b }: { a: number; b: number }) => ({ result: a + b }); + +const calculator = tool({ + description: "Add two numbers", + inputSchema: z.object({ + a: z.number(), + b: z.number(), + }), + execute: add, +}); + +describe("calculator tool", () => { + it("adds two numbers", async () => { + const result = await add({ a: 2, b: 3 }); + expect(result).toEqual({ result: 5 }); + }); +}); +``` + +### 5. Test flow agent steps + +Flow agents have typed input/output schemas. Test the full pipeline or individual steps by checking `result.ok`, `result.output`, and `result.trace`. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; +import { describe, it, expect } from "vitest"; + +const pipeline = flowAgent( + { + name: "text-stats", + input: z.object({ text: z.string() }), + output: z.object({ wordCount: z.number(), charCount: z.number() }), + }, + async ({ input, $ }) => { + const stats = await $.step({ + id: "compute-stats", + execute: async () => ({ + wordCount: input.text.split(/\s+/).filter(Boolean).length, + charCount: input.text.length, + }), + }); + + if (!stats.ok) throw new Error(stats.error.message); + + return stats.value; + }, +); + +describe("text-stats flow agent", () => { + it("computes word and character counts", async () => { + const result = await pipeline.generate({ input: { text: "hello world" } }); + + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.output).toEqual({ wordCount: 2, charCount: 11 }); + expect(result.trace.length).toBeGreaterThan(0); + expect(result.duration).toBeGreaterThanOrEqual(0); + } + }); + + it("rejects invalid input", async () => { + // @ts-expect-error intentionally passing wrong type + const result = await pipeline.generate({ input: { text: 42 } }); + expect(result.ok).toBe(false); + }); +}); +``` + +### 6. Test error paths + +Verify that failing steps produce `ok: false` with meaningful error codes. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; +import { describe, it, expect } from "vitest"; + +const failingFlowAgent = flowAgent( + { + name: "failing", + input: z.object({ shouldFail: z.boolean() }), + output: z.object({ status: z.string() }), + }, + async ({ input, $ }) => { + const result = await $.step({ + id: "maybe-fail", + execute: async () => { + if (input.shouldFail) throw new Error("Intentional failure"); + return "success"; + }, + }); + + return { status: result.ok ? result.value : "failed" }; + }, +); + +describe("error paths", () => { + it("handles step failure gracefully", async () => { + const result = await failingFlowAgent.generate({ input: { shouldFail: true } }); + + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.output.status).toBe("failed"); + } + }); + + it("succeeds on happy path", async () => { + const result = await failingFlowAgent.generate({ input: { shouldFail: false } }); + + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.output.status).toBe("success"); + } + }); +}); +``` + +### 7. Assert on token usage + +Verify that `result.usage` contains expected token counts after generation. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { describe, it, expect } from "vitest"; + +const helper = agent({ + name: "helper", + model: openai("gpt-4.1"), + system: "Reply with one word.", +}); + +describe("usage tracking", () => { + it("reports token usage on successful generation", async () => { + const result = await helper.generate({ prompt: "Say hello" }); + + if (result.ok) { + expect(result.usage.inputTokens).toBeGreaterThan(0); + expect(result.usage.outputTokens).toBeGreaterThan(0); + expect(result.usage.totalTokens).toBe(result.usage.inputTokens + result.usage.outputTokens); + } + }); +}); +``` + +### 8. Use hooks for test observability + +Capture lifecycle events with hooks to verify execution order and timing. + +```ts +import { flowAgent } from "@funkai/agents"; +import { z } from "zod"; +import { describe, it, expect } from "vitest"; + +describe("flow agent hooks", () => { + it("fires hooks in correct order", async () => { + const events: string[] = []; + + const traced = flowAgent( + { + name: "traced", + input: z.object({ value: z.string() }), + output: z.object({ result: z.string() }), + onStart: () => { + events.push("flow:start"); + }, + onStepStart: ({ step }) => { + events.push(`step:start:${step.id}`); + }, + onStepFinish: ({ step }) => { + events.push(`step:finish:${step.id}`); + }, + onFinish: () => { + events.push("flow:finish"); + }, + }, + async ({ input, $ }) => { + await $.step({ + id: "process", + execute: async () => input.value.toUpperCase(), + }); + return { result: input.value.toUpperCase() }; + }, + ); + + await traced.generate({ input: { value: "test" } }); + + expect(events).toEqual([ + "flow:start", + "step:start:process", + "step:finish:process", + "flow:finish", + ]); + }); +}); +``` + +## Verification + +- All tests pass: `pnpm test --filter=@funkai/agents` +- `result.ok` is checked before accessing success fields +- Error paths return `ok: false` with `error.code` and `error.message` +- Hook events fire in the documented order + +## Troubleshooting + +### Tests hang indefinitely + +**Issue:** Agent tests wait for a real model response that never arrives. + +**Fix:** Use a fast model (e.g. `openai/gpt-4.1-nano`) or set a timeout on the test. Pass an `AbortSignal` with a deadline. + +### Input validation errors in tests + +**Issue:** Test input does not match the Zod schema. + +**Fix:** Ensure test data satisfies all required fields and types in the agent's `input` schema. + +### Hook assertions fail due to ordering + +**Issue:** Hook events arrive in an unexpected order. + +**Fix:** Hooks fire in a deterministic order: base hooks first, then per-call hooks. + +## References + +- [`agent()` reference](/reference/agents/agent) +- [`flowAgent()` reference](/reference/agents/flow-agent) diff --git a/packages/agents/docs/tools.md b/packages/agents/docs/tools.md new file mode 100644 index 0000000..d2df0d8 --- /dev/null +++ b/packages/agents/docs/tools.md @@ -0,0 +1,174 @@ +# Tools + +`tool()` creates a tool for AI agent function calling. It wraps the AI SDK's `tool()` helper, converting Zod schemas to JSON Schema via `zodSchema()` for model I/O validation. + +## Define a tool + +Provide a `description`, an `inputSchema`, and an `execute` function. The `execute` function receives the validated input directly — not wrapped in an object. + +```ts +import { tool } from "@funkai/agents"; +import { z } from "zod"; + +const fetchPage = tool({ + description: "Fetch a web page by URL", + inputSchema: z.object({ url: z.url() }), + execute: async ({ url }) => { + const res = await fetch(url); + return { url, status: res.status, body: await res.text() }; + }, +}); +``` + +## Register a tool on an agent + +Pass tools as a record on the agent config. The tool's **name comes from the object key**, not from the tool definition. The model sees the key name and uses the `description` to decide when to call it. + +```ts +import { agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; + +const researcher = agent({ + name: "researcher", + model: openai("gpt-4.1"), + system: "You research topics by fetching web pages.", + tools: { fetchPage }, +}); +``` + +## Add output validation + +Use `outputSchema` to validate the tool's return value before it is sent back to the model. + +```ts +const calculator = tool({ + description: "Evaluate a math expression", + inputSchema: z.object({ expression: z.string() }), + outputSchema: z.object({ result: z.number() }), + execute: async ({ expression }) => { + const result = eval(expression); // simplified example + return { result }; + }, +}); +``` + +## Add input examples + +Use `inputExamples` to help the model understand expected input structure. Natively supported by Anthropic; for other providers, examples can be injected into the description via middleware. + +```ts +const searchTool = tool({ + description: "Search the codebase", + inputSchema: z.object({ + query: z.string().describe("Search query"), + maxResults: z.number().default(10), + }), + inputExamples: [ + { input: { query: "authentication middleware", maxResults: 5 } }, + { input: { query: "database connection pool", maxResults: 10 } }, + ], + execute: async ({ query, maxResults }) => { + return await codeSearch(query, maxResults); + }, +}); +``` + +## Destructure input + +Since `execute` receives the validated input directly, destructure in the function signature for cleaner code. + +```ts +const createFile = tool({ + description: "Create a file with the given content", + inputSchema: z.object({ + path: z.string(), + content: z.string(), + }), + execute: async ({ path, content }) => { + await fs.writeFile(path, content); + return { created: path }; + }, +}); +``` + +## Full example + +```ts +import { tool, agent } from "@funkai/agents"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const fetchPage = tool({ + description: "Fetch the contents of a web page by URL", + inputSchema: z.object({ + url: z.url(), + }), + execute: async ({ url }) => { + const res = await fetch(url); + return { + url, + status: res.status, + body: await res.text(), + }; + }, +}); + +// Tool name ("fetchPage") comes from the object key +const assistant = agent({ + name: "assistant", + model: openai("gpt-4.1"), + system: "You are a helpful assistant that can fetch web pages.", + tools: { fetchPage }, +}); +``` + +--- + +## Reference: `tool()` signature + +```ts +function tool(config: ToolConfig): Tool; +``` + +## Reference: ToolConfig + +| Field | Required | Type | Description | +| --------------- | -------- | ------------------------------------- | ------------------------------------------ | +| `description` | Yes | `string` | What the tool does (shown to the model) | +| `inputSchema` | Yes | `ZodType` | Zod schema for validating and typing input | +| `execute` | Yes | `(input: TInput) => Promise` | Execute the tool with validated input | +| `outputSchema` | No | `ZodType` | Zod schema for validating output | +| `title` | No | `string` | Display title for UIs and logs | +| `inputExamples` | No | `Array<{ input: TInput }>` | Example inputs to guide the model | + +There is no `name` field on `ToolConfig`. Tool names come from the object key when passed to an agent's `tools` record. + +## Reference: Tool type + +```ts +type Tool = ReturnType>; +``` + +--- + +## Troubleshooting + +### Tool not being called + +Improve the `description` so the model understands when to use it. Add `.describe()` calls to individual schema fields to guide generation. + +### Input validation errors + +Ensure the `inputSchema` matches what the model is likely to produce. + +### Tool name mismatch + +Tool names come from the object key in `tools: { myName: myTool }`, not from the tool definition itself. + +--- + +## See also + +- [Create an Agent](create-agent.md) +- [Create a Flow Agent](create-flow-agent.md) +- [Troubleshooting](troubleshooting.md) diff --git a/packages/agents/docs/troubleshooting.md b/packages/agents/docs/troubleshooting.md index 621385e..835b315 100644 --- a/packages/agents/docs/troubleshooting.md +++ b/packages/agents/docs/troubleshooting.md @@ -38,7 +38,7 @@ By design. Hook errors are caught and never propagate. Handle errors inside the ## Abort signal propagation -Signals propagate through the entire execution tree: agents, workflows, subagents, and `$.all`/`$.race` entries. +Signals propagate through the entire execution tree: agents, flow agents, subagents, and `$.all`/`$.race` entries. ## Tool not being called by agent @@ -48,13 +48,12 @@ Signals propagate through the entire execution tree: agents, workflows, subagent **Fix:** Always check `.ok` before accessing success fields. Use `result.error.code` on failure. -## Workflow output validation failed +## Flow agent output validation failed **Fix:** Ensure the handler returns an object matching the `output` Zod schema exactly. ## References -- [Agent](core/agent.md) -- [Workflow](core/workflow.md) -- [Provider Overview](provider/overview.md) -- [Create an Agent](guides/create-agent.md) +- [Create an Agent](create-agent.md) +- [Create a Flow Agent](create-flow-agent.md) +- [Overview](overview.md) diff --git a/packages/agents/package.json b/packages/agents/package.json index bb5c7e3..90422d5 100644 --- a/packages/agents/package.json +++ b/packages/agents/package.json @@ -39,7 +39,7 @@ }, "dependencies": { "@funkai/models": "workspace:*", - "ai": "^6.0.116", + "ai": "^6.0.136", "es-toolkit": "catalog:", "ts-pattern": "catalog:", "type-fest": "^5.5.0", diff --git a/packages/cli/README.md b/packages/cli/README.md index 611f79b..e498b1d 100644 --- a/packages/cli/README.md +++ b/packages/cli/README.md @@ -27,27 +27,27 @@ npm install @funkai/cli Generate typed TypeScript modules from `.prompt` files. ```bash -funkai prompts generate --out .prompts/client --roots src/agents +funkai prompts generate --out .prompts/client --includes "src/agents/**" ``` -| Flag | Description | -| ------------ | --------------------------------------------------- | -| `--out` | Output directory for generated files | -| `--roots` | Directories to scan recursively for `.prompt` files | -| `--partials` | Custom partials directory | -| `--silent` | Suppress output except errors | +| Flag | Description | +| ------------ | ----------------------------------------- | +| `--out` | Output directory for generated files | +| `--includes` | Glob patterns to scan for `.prompt` files | +| `--partials` | Custom partials directory | +| `--silent` | Suppress output except errors | ### `funkai prompts lint` Validate `.prompt` files without generating output. ```bash -funkai prompts lint --roots src/agents +funkai prompts lint --includes "src/agents/**" ``` | Flag | Description | | ------------ | -------------------------------------------------------- | -| `--roots` | Directories to scan | +| `--includes` | Glob patterns to scan for `.prompt` files | | `--partials` | Custom partials directory (default: `.prompts/partials`) | | `--silent` | Suppress output except errors | @@ -78,8 +78,8 @@ Configures VSCode file associations, Liquid extension recommendation, `.gitignor ## Documentation -See the [Prompts SDK docs](../prompts/docs/overview.md) for the full file format, library API, and guides. +See the [Prompts concept](/concepts/prompts) and [Prompts CLI reference](/reference/prompts/cli) for the full file format, library API, and guides. ## License -[MIT](../../LICENSE) +[MIT](https://github.com/joggrdocs/funkai/blob/main/LICENSE) diff --git a/packages/cli/package.json b/packages/cli/package.json index 543f277..f7d25f8 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -39,10 +39,10 @@ "@funkai/prompts": "workspace:*", "@kidd-cli/core": "^0.10.0", "es-toolkit": "catalog:", - "liquidjs": "^10.25.0", + "liquidjs": "^10.25.1", "picomatch": "^4.0.3", "ts-pattern": "catalog:", - "yaml": "^2.8.2", + "yaml": "^2.8.3", "zod": "catalog:" }, "devDependencies": { diff --git a/packages/models/README.md b/packages/models/README.md index 6d025ff..b68d7f8 100644 --- a/packages/models/README.md +++ b/packages/models/README.md @@ -1,95 +1,120 @@ -# @funkai/models +
+

@funkai/models

+

Model catalog, provider resolution, and cost calculations for the funkai AI SDK.

-Model catalog, provider resolution, and cost calculations for the funkai AI SDK. +npm version +License -## Quick Start +
+ +## Features + +- :books: **300+ model catalog** — Unified catalog across 20+ providers, sourced from models.dev. +- :dart: **Type-safe model IDs** — Autocomplete for all cataloged models while accepting arbitrary strings. +- :electric_plug: **Provider resolution** — Map `"provider/model"` strings to AI SDK `LanguageModel` instances. +- :moneybag: **Cost calculation** — Calculate USD cost from token usage and per-token pricing. +- :package: **Subpath imports** — Per-provider imports for filtered model lists and typed IDs with zero-bundle overhead. + +## Install + +```bash +npm install @funkai/models +``` + +## Usage + +### Look up a model ```ts -import { model, models, createModelResolver, calculateCost, openrouter } from "@funkai/models"; +import { model } from "@funkai/models"; + +const gpt = model("gpt-4.1"); + +if (gpt) { + console.log(gpt.name); // "GPT-4.1" + console.log(gpt.contextWindow); // 1047576 + console.log(gpt.pricing.input); // cost per input token in USD + console.log(gpt.capabilities); // { reasoning, toolCall, ... } +} +``` + +### Filter models -const gpt = model("openai/gpt-4.1"); +```ts +import { models } from "@funkai/models"; const reasoning = models((m) => m.capabilities.reasoning); +const vision = models((m) => m.modalities.input.includes("image")); +const cheap = models((m) => m.capabilities.toolCall).toSorted( + (a, b) => a.pricing.input - b.pricing.input, +); +``` + +### Resolve providers -const resolve = createModelResolver({ - fallback: openrouter, +```ts +import { createProviderRegistry } from "@funkai/models"; +import { createOpenAI } from "@ai-sdk/openai"; +import { anthropic } from "@ai-sdk/anthropic"; + +const registry = createProviderRegistry({ + providers: { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + anthropic, + }, }); -const lm = resolve("openai/gpt-4.1"); + +// Returns a LanguageModel — pass directly to agent() +const lm = registry("openai/gpt-4.1"); ``` -## API Reference +### Calculate costs -### Catalog +```ts +import { model, calculateCost } from "@funkai/models"; + +const m = model("gpt-4.1"); +if (m) { + const cost = calculateCost( + { + inputTokens: 1000, + outputTokens: 500, + totalTokens: 1500, + cacheReadTokens: 0, + cacheWriteTokens: 0, + reasoningTokens: 0, + }, + m.pricing, + ); + console.log(`Total: $${cost.total.toFixed(6)}`); +} +``` -| Export | Type | Description | -| -------- | ---------- | ------------------------------------------------ | -| `model` | `function` | Look up a single model definition by ID | -| `models` | `function` | Return all models, optionally filtered | -| `MODELS` | `const` | Complete readonly array of all model definitions | +## API -### Provider Resolution +| Export | Description | +| ------------------------ | ------------------------------------------------------ | +| `model(id)` | Look up a single model definition by ID | +| `models(filter?)` | Return all models, optionally filtered by a predicate | +| `MODELS` | Complete readonly array of all model definitions | +| `createProviderRegistry` | Create a registry that resolves model IDs to providers | +| `calculateCost` | Calculate USD cost from token usage and pricing | -| Export | Type | Description | -| --------------------- | ---------- | ----------------------------------------------------- | -| `createModelResolver` | `function` | Create a resolver with provider mappings and fallback | -| `openrouter` | `function` | Cached OpenRouter model resolver (reads env API key) | -| `createOpenRouter` | `function` | Create a new OpenRouter provider instance | +## Subpath Exports -### Cost Calculation +Per-provider subpath imports give access to filtered model lists and typed IDs: -| Export | Type | Description | -| --------------- | ---------- | --------------------------------------------- | -| `calculateCost` | `function` | Calculate USD cost from token usage + pricing | +```ts +import { openAIModels, openAIModel } from "@funkai/models/openai"; +import { anthropicModels } from "@funkai/models/anthropic"; +``` -### Types +Available for: `openai`, `anthropic`, `google`, `google-vertex`, `mistral`, `amazon-bedrock`, `groq`, `deepseek`, `xai`, `cohere`, `fireworks-ai`, `togetherai`, `deepinfra`, `cerebras`, `perplexity`, `openrouter`, `llama`, `alibaba`, `nvidia`, `huggingface`, `inception`. -| Export | Kind | Description | -| --------------------- | ------ | ------------------------------------------------- | -| `ModelDefinition` | `type` | Full model metadata with pricing and capabilities | -| `ModelId` | `type` | Model identifier with autocomplete support | -| `KnownModelId` | `type` | Union of all cataloged model IDs | -| `ModelPricing` | `type` | Per-token pricing rates in USD | -| `ModelCapabilities` | `type` | Boolean capability flags (reasoning, tools, etc.) | -| `ModelModalities` | `type` | Input/output modality descriptors | -| `ModelResolver` | `type` | Function that resolves model ID to LanguageModel | -| `ModelResolverConfig` | `type` | Configuration for `createModelResolver` | -| `LanguageModel` | `type` | AI SDK language model instance (v3) | -| `TokenUsage` | `type` | Token counts from a model invocation | -| `UsageCost` | `type` | Breakdown of cost in USD | +## Documentation -## Subpath Exports +For comprehensive documentation, see the [Models concept](/concepts/models) and [`model()` reference](/reference/models/model). + +## License -Provider-specific subpath exports give access to filtered model lists and typed IDs: - -| Import Path | Exports | -| ------------------------------- | --------------------------------------------------- | -| `@funkai/models` | Full API (catalog, provider, cost) | -| `@funkai/models/openai` | `openAIModels`, `openAIModel()`, `OpenAIModelId` | -| `@funkai/models/anthropic` | `anthropicModels`, `anthropicModel()`, etc. | -| `@funkai/models/google` | `googleModels`, `googleModel()`, etc. | -| `@funkai/models/google-vertex` | `googleVertexModels`, `googleVertexModel()`, etc. | -| `@funkai/models/mistral` | `mistralModels`, `mistralModel()`, etc. | -| `@funkai/models/amazon-bedrock` | `amazonBedrockModels`, `amazonBedrockModel()`, etc. | -| `@funkai/models/groq` | `groqModels`, `groqModel()`, etc. | -| `@funkai/models/deepseek` | `deepseekModels`, `deepseekModel()`, etc. | -| `@funkai/models/xai` | `xaiModels`, `xaiModel()`, etc. | -| `@funkai/models/cohere` | `cohereModels`, `cohereModel()`, etc. | -| `@funkai/models/fireworks-ai` | `fireworksAIModels`, `fireworksAIModel()`, etc. | -| `@funkai/models/togetherai` | `togetheraiModels`, `togetheraiModel()`, etc. | -| `@funkai/models/deepinfra` | `deepinfraModels`, `deepinfraModel()`, etc. | -| `@funkai/models/cerebras` | `cerebrasModels`, `cerebrasModel()`, etc. | -| `@funkai/models/perplexity` | `perplexityModels`, `perplexityModel()`, etc. | -| `@funkai/models/openrouter` | `openrouterModels`, `openrouterModel()`, etc. | -| `@funkai/models/llama` | `llamaModels`, `llamaModel()`, etc. | -| `@funkai/models/alibaba` | `alibabaModels`, `alibabaModel()`, etc. | -| `@funkai/models/nvidia` | `nvidiaModels`, `nvidiaModel()`, etc. | -| `@funkai/models/huggingface` | `huggingfaceModels`, `huggingfaceModel()`, etc. | -| `@funkai/models/inception` | `inceptionModels`, `inceptionModel()`, etc. | - -## References - -- [Overview](docs/overview.md) -- [Model Catalog](docs/catalog/overview.md) -- [Provider Resolution](docs/provider/overview.md) -- [Cost Calculation](docs/cost/overview.md) -- [Troubleshooting](docs/troubleshooting.md) +[MIT](https://github.com/joggrdocs/funkai/blob/main/LICENSE) diff --git a/packages/models/docs/catalog.md b/packages/models/docs/catalog.md new file mode 100644 index 0000000..fac7fcb --- /dev/null +++ b/packages/models/docs/catalog.md @@ -0,0 +1,302 @@ +# Model Catalog + +The model catalog is an auto-generated, readonly collection of `ModelDefinition` objects sourced from [models.dev](https://models.dev). It provides lookup functions, type-safe IDs with autocomplete, and per-provider subpath exports. + +## Architecture + +```mermaid +%%{init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#313244', + 'primaryTextColor': '#cdd6f4', + 'primaryBorderColor': '#6c7086', + 'lineColor': '#89b4fa', + 'secondaryColor': '#45475a', + 'tertiaryColor': '#1e1e2e', + 'background': '#1e1e2e', + 'mainBkg': '#313244', + 'clusterBkg': '#1e1e2e', + 'clusterBorder': '#45475a' + }, + 'flowchart': { 'curve': 'basis', 'padding': 15 } +}}%% + +flowchart LR + source["models.dev API"]:::external + + subgraph generation [" "] + script["generate:models script"]:::core + providers["Per-provider .ts files"]:::core + end + + subgraph catalog [" "] + MODELS["MODELS constant"]:::core + modelFn["model(id)"]:::core + modelsFn["models(filter?)"]:::core + end + + source --> script + script --> providers + providers --> MODELS + MODELS --> modelFn + MODELS --> modelsFn + + classDef external fill:#313244,stroke:#f5c2e7,stroke-width:2px,color:#cdd6f4 + classDef core fill:#313244,stroke:#89b4fa,stroke-width:2px,color:#cdd6f4 + + style generation fill:#181825,stroke:#fab387,stroke-width:2px + style catalog fill:#181825,stroke:#89b4fa,stroke-width:2px +``` + +## ModelDefinition + +Each model has the following fields: + +| Field | Type | Description | +| --------------- | ------------------- | ---------------------------------------------- | +| `id` | `string` | Provider-native identifier (e.g. `"gpt-4.1"`) | +| `name` | `string` | Human-readable display name | +| `provider` | `string` | Provider slug (e.g. `"openai"`) | +| `family` | `string` | Model family (e.g. `"gpt"`, `"claude-sonnet"`) | +| `pricing` | `ModelPricing` | Per-token pricing rates in USD | +| `contextWindow` | `number` | Maximum context window in tokens | +| `maxOutput` | `number` | Maximum output tokens | +| `modalities` | `ModelModalities` | Supported input/output modalities | +| `capabilities` | `ModelCapabilities` | Boolean capability flags | + +### ModelPricing + +| Field | Type | Description | +| ------------ | --------------------- | ----------------------------------- | +| `input` | `number` | Cost per input token | +| `output` | `number` | Cost per output token | +| `cacheRead` | `number \| undefined` | Cost per cached input token (read) | +| `cacheWrite` | `number \| undefined` | Cost per cached input token (write) | + +### ModelCapabilities + +| Field | Type | Description | +| ------------------ | --------- | -------------------------------- | +| `reasoning` | `boolean` | Supports chain-of-thought | +| `toolCall` | `boolean` | Supports tool (function) calling | +| `attachment` | `boolean` | Supports file/image attachments | +| `structuredOutput` | `boolean` | Supports structured JSON output | + +### ModelModalities + +| Field | Type | Description | +| -------- | ------------------- | ---------------------------------------------------- | +| `input` | `readonly string[]` | Accepted input modalities (e.g. `"text"`, `"image"`) | +| `output` | `readonly string[]` | Produced output modalities | + +## Lookup API + +### Look Up a Single Model + +`model(id)` returns the matching `ModelDefinition` or `null`: + +```ts +import { model } from "@funkai/models"; + +const m = model("openai/gpt-4.1"); +if (m) { + console.log(m.name); + console.log(m.pricing.input); + console.log(m.capabilities.reasoning); +} +``` + +### Get All Models + +`models()` returns the full catalog. Pass a predicate to filter: + +```ts +import { models } from "@funkai/models"; + +const all = models(); +const withTools = models((m) => m.capabilities.toolCall); +``` + +### Access the Raw Catalog + +`MODELS` is the complete readonly array, useful when you need direct iteration: + +```ts +import { MODELS } from "@funkai/models"; + +const providers = new Set(MODELS.map((m) => m.provider)); +``` + +## ModelId Type + +`ModelId` provides autocomplete for known model IDs while accepting arbitrary strings for new or custom models: + +```ts +import type { ModelId } from "@funkai/models"; + +const id: ModelId = "openai/gpt-4.1"; +``` + +## Filtering Patterns + +`models()` accepts an optional predicate function `(m: ModelDefinition) => boolean`. When provided, only models where the predicate returns `true` are included. + +### Filter by Capability + +```ts +const reasoning = models((m) => m.capabilities.reasoning); +const withTools = models((m) => m.capabilities.toolCall); +const structured = models((m) => m.capabilities.structuredOutput); +``` + +### Filter by Provider + +```ts +const openai = models((m) => m.provider === "openai"); +const anthropic = models((m) => m.provider === "anthropic"); +``` + +### Filter by Modality + +```ts +const vision = models((m) => m.modalities.input.includes("image")); +const audio = models((m) => m.modalities.input.includes("audio")); +const multimodal = models((m) => m.modalities.input.length > 1); +``` + +### Filter by Context Window + +```ts +const largeContext = models((m) => m.contextWindow >= 128_000); +const longOutput = models((m) => m.maxOutput >= 16_000); +``` + +### Filter by Pricing + +```ts +const cheapInput = models((m) => m.pricing.input < 0.000001); +const withCache = models((m) => m.pricing.cacheRead != null); +``` + +### Filter by Family + +```ts +const gpt = models((m) => m.family === "gpt"); +const claude = models((m) => m.family.startsWith("claude")); +``` + +### Combine Multiple Conditions + +```ts +const ideal = models( + (m) => + m.capabilities.reasoning && + m.capabilities.toolCall && + m.contextWindow >= 128_000 && + m.pricing.input < 0.00001, +); +``` + +### Sort by Price + +```ts +const cheapest = models((m) => m.capabilities.reasoning).toSorted( + (a, b) => a.pricing.input - b.pricing.input, +); + +const pick = cheapest[0]; +``` + +### Extract Unique Values + +```ts +const providers = [...new Set(models().map((m) => m.provider))]; +const families = [...new Set(models().map((m) => m.family))]; +``` + +### Per-Provider Filtering + +Use subpath exports for provider-scoped operations: + +```ts +import { openAIModels } from "@funkai/models/openai"; + +const reasoningGpt = openAIModels.filter((m) => m.capabilities.reasoning); +``` + +## Supported Providers + +The model catalog includes models from 21 providers. Each provider has a dedicated subpath export and a prefix used in model IDs. + +| Provider | Prefix | Subpath Import | +| -------------- | ---------------- | ------------------------------- | +| OpenAI | `openai` | `@funkai/models/openai` | +| Anthropic | `anthropic` | `@funkai/models/anthropic` | +| Google | `google` | `@funkai/models/google` | +| Google Vertex | `google-vertex` | `@funkai/models/google-vertex` | +| Mistral | `mistral` | `@funkai/models/mistral` | +| Amazon Bedrock | `amazon-bedrock` | `@funkai/models/amazon-bedrock` | +| Groq | `groq` | `@funkai/models/groq` | +| DeepSeek | `deepseek` | `@funkai/models/deepseek` | +| xAI | `xai` | `@funkai/models/xai` | +| Cohere | `cohere` | `@funkai/models/cohere` | +| Fireworks AI | `fireworks-ai` | `@funkai/models/fireworks-ai` | +| Together AI | `togetherai` | `@funkai/models/togetherai` | +| DeepInfra | `deepinfra` | `@funkai/models/deepinfra` | +| Cerebras | `cerebras` | `@funkai/models/cerebras` | +| Perplexity | `perplexity` | `@funkai/models/perplexity` | +| OpenRouter | `openrouter` | `@funkai/models/openrouter` | +| Llama | `llama` | `@funkai/models/llama` | +| Alibaba | `alibaba` | `@funkai/models/alibaba` | +| NVIDIA | `nvidia` | `@funkai/models/nvidia` | +| Hugging Face | `huggingface` | `@funkai/models/huggingface` | +| Inception | `inception` | `@funkai/models/inception` | + +## Per-Provider Subpath Exports + +Each provider subpath exports three members following a consistent naming pattern: + +| Export | Type | Description | +| ------------------- | ---------- | ------------------------------------------------ | +| `Models` | `const` | Readonly array of `ModelDefinition` for provider | +| `Model` | `function` | Look up a model by ID, returns `null` if missing | +| `ModelId` | `type` | Union type of known model IDs for the provider | + +```ts +import { anthropicModels, anthropicModel } from "@funkai/models/anthropic"; +import type { AnthropicModelId } from "@funkai/models/anthropic"; + +const id: AnthropicModelId = "claude-sonnet-4-20250514"; + +const m = anthropicModel(id); +if (m) { + console.log(m.name, m.pricing.input); +} + +const withReasoning = anthropicModels.filter((m) => m.capabilities.reasoning); +``` + +Model IDs in the catalog use the format `` (e.g. `"gpt-4.1"`, `"claude-sonnet-4-20250514"`). When used with `createProviderRegistry()`, prefix them with the provider slug: `"openai/gpt-4.1"`, `"anthropic/claude-sonnet-4-20250514"`. + +## Updating the Catalog + +Regenerate the catalog from models.dev: + +```bash +pnpm --filter=@funkai/models generate:models +``` + +Force-regenerate (ignoring staleness cache): + +```bash +pnpm --filter=@funkai/models generate:models --force +``` + +This requires `OPENROUTER_API_KEY` to be set in the environment. + +## References + +- [Provider Resolution](provider-resolution.md) +- [Cost Tracking](cost-tracking.md) +- [Troubleshooting](troubleshooting.md) diff --git a/packages/models/docs/catalog/providers.md b/packages/models/docs/catalog/providers.md index 7c1c788..fc9123e 100644 --- a/packages/models/docs/catalog/providers.md +++ b/packages/models/docs/catalog/providers.md @@ -56,7 +56,7 @@ const withReasoning = anthropicModels.filter((m) => m.capabilities.reasoning); ## Model ID Format -Model IDs in the catalog use the format `` (e.g. `"gpt-4.1"`, `"claude-sonnet-4-20250514"`). When used with `createModelResolver()`, prefix them with the provider slug: `"openai/gpt-4.1"`, `"anthropic/claude-sonnet-4-20250514"`. +Model IDs in the catalog use the format `` (e.g. `"gpt-4.1"`, `"claude-sonnet-4-20250514"`). When used with `createProviderRegistry()`, prefix them with the provider slug: `"openai/gpt-4.1"`, `"anthropic/claude-sonnet-4-20250514"`. ## Data Source diff --git a/packages/models/docs/cost-tracking.md b/packages/models/docs/cost-tracking.md new file mode 100644 index 0000000..ac52bbd --- /dev/null +++ b/packages/models/docs/cost-tracking.md @@ -0,0 +1,144 @@ +# Cost Tracking + +`calculateCost()` computes the USD cost of a model invocation by multiplying token counts against per-token pricing rates from the catalog. + +## calculateCost() API + +```ts +import { calculateCost, model } from "@funkai/models"; +import type { TokenUsage } from "@funkai/models"; + +const m = model("openai/gpt-4.1"); +if (m) { + const cost = calculateCost(usage, m.pricing); +} +``` + +## Types + +### TokenUsage + +Token counts from a model invocation: + +| Field | Type | Description | +| ------------------ | -------- | ------------------------------------- | +| `inputTokens` | `number` | Number of input (prompt) tokens | +| `outputTokens` | `number` | Number of output (completion) tokens | +| `totalTokens` | `number` | Total tokens (input + output) | +| `cacheReadTokens` | `number` | Tokens served from prompt cache | +| `cacheWriteTokens` | `number` | Tokens written into prompt cache | +| `reasoningTokens` | `number` | Tokens consumed by internal reasoning | + +### ModelPricing + +Per-token pricing rates from the model catalog: + +| Field | Type | Description | +| ------------ | --------------------- | --------------------------------- | +| `input` | `number` | Cost per input token (USD) | +| `output` | `number` | Cost per output token (USD) | +| `cacheRead` | `number \| undefined` | Cost per cached read token (USD) | +| `cacheWrite` | `number \| undefined` | Cost per cached write token (USD) | + +Pricing rates are stored per-token in the catalog (converted from per-million at generation time). No runtime conversion is needed. + +### UsageCost + +The output of `calculateCost()`: + +| Field | Type | Description | +| ------------ | -------- | ---------------------------- | +| `input` | `number` | Cost for input tokens | +| `output` | `number` | Cost for output tokens | +| `cacheRead` | `number` | Cost for cached read tokens | +| `cacheWrite` | `number` | Cost for cached write tokens | +| `total` | `number` | Sum of all cost fields | + +All fields are non-negative. Fields that don't apply are `0`. + +## Basic Usage + +```ts +const m = model("openai/gpt-4.1"); +if (!m) { + throw new Error("Model not found in catalog"); +} + +const usage: TokenUsage = { + inputTokens: 1500, + outputTokens: 800, + totalTokens: 2300, + cacheReadTokens: 500, + cacheWriteTokens: 0, + reasoningTokens: 0, +}; + +const cost = calculateCost(usage, m.pricing); +console.log(`Total: $${cost.total.toFixed(6)}`); +``` + +## Cost Breakdown + +```ts +const cost = calculateCost(usage, m.pricing); + +console.log(`Input: $${cost.input.toFixed(6)}`); +console.log(`Output: $${cost.output.toFixed(6)}`); +console.log(`Cache read: $${cost.cacheRead.toFixed(6)}`); +console.log(`Cache write: $${cost.cacheWrite.toFixed(6)}`); +console.log(`Total: $${cost.total.toFixed(6)}`); +``` + +## Accumulating Costs Across Calls + +```ts +const totalCost = runs.reduce((sum, run) => { + const runModel = model(run.modelId); + if (!runModel) return sum; + return sum + calculateCost(run.usage, runModel.pricing).total; +}, 0); + +console.log(`Session total: $${totalCost.toFixed(6)}`); +``` + +## Comparing Model Costs + +Estimate the cost of a workload across different models: + +```ts +const usage: TokenUsage = { + inputTokens: 10_000, + outputTokens: 2_000, + totalTokens: 12_000, + cacheReadTokens: 0, + cacheWriteTokens: 0, + reasoningTokens: 0, +}; + +const candidates = models((m) => m.capabilities.reasoning); + +const costs = candidates.map((m) => ({ + id: m.id, + total: calculateCost(usage, m.pricing).total, +})); + +const sorted = costs.toSorted((a, b) => a.total - b.total); +``` + +## Calculation Formula + +```text +input = inputTokens * pricing.input +output = outputTokens * pricing.output +cacheRead = cacheReadTokens * (pricing.cacheRead ?? 0) +cacheWrite = cacheWriteTokens * (pricing.cacheWrite ?? 0) +total = input + output + cacheRead + cacheWrite +``` + +Optional pricing fields (`cacheRead`, `cacheWrite`) default to `0` when absent. + +## References + +- [Model Catalog](catalog.md) +- [Provider Resolution](provider-resolution.md) +- [Troubleshooting](troubleshooting.md) diff --git a/packages/models/docs/guides/setup-resolver.md b/packages/models/docs/guides/setup-resolver.md index 025849f..d6204f5 100644 --- a/packages/models/docs/guides/setup-resolver.md +++ b/packages/models/docs/guides/setup-resolver.md @@ -1,12 +1,11 @@ -# Set Up a Model Resolver +# Set Up a Provider Registry -Configure `createModelResolver()` with multiple providers and an OpenRouter fallback. +Configure `createProviderRegistry()` with multiple providers. ## Prerequisites - `@funkai/models` installed - API keys for your providers (OpenAI, Anthropic, etc.) -- `OPENROUTER_API_KEY` set in the environment (for fallback) ## Steps @@ -18,55 +17,52 @@ Install the AI SDK providers you want to use directly: pnpm add @ai-sdk/openai @ai-sdk/anthropic ``` -### 2. Create the Resolver +### 2. Create the Registry ```ts -import { createModelResolver, openrouter } from "@funkai/models"; +import { createProviderRegistry } from "@funkai/models"; import { createOpenAI } from "@ai-sdk/openai"; import { createAnthropic } from "@ai-sdk/anthropic"; -const resolve = createModelResolver({ +const registry = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }), }, - fallback: openrouter, }); ``` ### 3. Resolve Models ```ts -const gpt = resolve("openai/gpt-4.1"); -const claude = resolve("anthropic/claude-sonnet-4"); -const mistral = resolve("mistral/mistral-large-latest"); +const gpt = registry("openai/gpt-4.1"); +const claude = registry("anthropic/claude-sonnet-4"); ``` - `"openai/gpt-4.1"` routes through `@ai-sdk/openai` directly - `"anthropic/claude-sonnet-4"` routes through `@ai-sdk/anthropic` directly -- `"mistral/mistral-large-latest"` has no mapped provider, so it routes through OpenRouter ### 4. Use with Agents -Pass the resolver to `@funkai/agents` by resolving the model before creating the agent: +Pass the registry to `@funkai/agents` by resolving the model before creating the agent: ```ts import { agent } from "@funkai/agents"; const summarizer = agent({ name: "summarizer", - model: resolve("openai/gpt-4.1"), + model: registry("openai/gpt-4.1"), prompt: ({ input }) => `Summarize:\n\n${input.text}`, }); ``` ## Verification -Verify the resolver works by resolving each configured provider: +Verify the registry works by resolving each configured provider: ```ts -const gpt = resolve("openai/gpt-4.1"); -const claude = resolve("anthropic/claude-sonnet-4"); +const gpt = registry("openai/gpt-4.1"); +const claude = registry("anthropic/claude-sonnet-4"); console.log(gpt.modelId); console.log(claude.modelId); @@ -76,29 +72,18 @@ console.log(claude.modelId); ### Cannot resolve model: no provider mapped -**Issue:** The model ID prefix does not match any key in `providers` and no `fallback` is configured. +**Issue:** The model ID prefix does not match any key in `providers`. -**Fix:** Add the provider to the `providers` map or configure a `fallback`: +**Fix:** Add the provider to the `providers` map: ```ts -const resolve = createModelResolver({ +const registry = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), }, - fallback: openrouter, }); ``` -### OPENROUTER_API_KEY environment variable is required - -**Issue:** Using `openrouter` as the fallback but `OPENROUTER_API_KEY` is not set. - -**Fix:** Set the environment variable: - -```bash -export OPENROUTER_API_KEY=sk-or-... -``` - ## References - [Provider Resolution](../provider/overview.md) diff --git a/packages/models/docs/overview.md b/packages/models/docs/overview.md index b9e1c11..36ad645 100644 --- a/packages/models/docs/overview.md +++ b/packages/models/docs/overview.md @@ -34,9 +34,8 @@ flowchart LR subgraph resolver [" "] direction TB - createResolver["createModelResolver()"]:::core + createRegistry["createProviderRegistry()"]:::core providers["Provider map"]:::gateway - fallback["Fallback (OpenRouter)"]:::gateway end subgraph cost [" "] @@ -49,11 +48,9 @@ flowchart LR ModelId --> lookup MODELS --> lookup lookup --> filter - ModelId --> createResolver - createResolver --> providers - createResolver --> fallback + ModelId --> createRegistry + createRegistry --> providers providers --> LanguageModel["LanguageModel"]:::external - fallback --> LanguageModel usage --> calcCost pricing --> calcCost calcCost --> UsageCost["UsageCost"]:::external @@ -68,72 +65,19 @@ flowchart LR style cost fill:#181825,stroke:#a6e3a1,stroke-width:2px ``` -The package has three domains: +## Three Domains -| Domain | Purpose | Key Exports | -| ------------ | -------------------------------------------- | ------------------------------------- | -| **Catalog** | Generated model metadata from models.dev | `model()`, `models()`, `MODELS` | -| **Provider** | Resolve model IDs to AI SDK `LanguageModel`s | `createModelResolver()`, `openrouter` | -| **Cost** | Calculate USD costs from token usage | `calculateCost()` | +| Domain | Purpose | Key Exports | +| ------------ | -------------------------------------------- | ------------------------------- | +| **Catalog** | Generated model metadata from models.dev | `model()`, `models()`, `MODELS` | +| **Provider** | Resolve model IDs to AI SDK `LanguageModel`s | `createProviderRegistry()` | +| **Cost** | Calculate USD costs from token usage | `calculateCost()` | -## Key Concepts +## Documentation -### Model Definitions - -Every model in the catalog is a `ModelDefinition` with pricing, capabilities, modalities, and context window metadata. The catalog is auto-generated from [models.dev](https://models.dev) and updated via `pnpm --filter=@funkai/models generate:models`. - -### Provider Resolution - -`createModelResolver()` maps model ID prefixes (e.g. `"openai"` from `"openai/gpt-4.1"`) to AI SDK provider factories. Unmapped prefixes fall through to an optional fallback (typically OpenRouter). - -### Cost Calculation - -`calculateCost()` multiplies token counts by per-token pricing rates. Pricing is stored per-token in the catalog (converted from per-million at generation time), so no runtime conversion is needed. - -## Usage - -### Look Up a Model - -```ts -const m = model("openai/gpt-4.1"); -if (m) { - console.log(m.name, m.contextWindow, m.capabilities.reasoning); -} -``` - -### Filter Models - -```ts -const reasoning = models((m) => m.capabilities.reasoning); -const multimodal = models((m) => m.modalities.input.includes("image")); -``` - -### Resolve a Model - -```ts -const resolve = createModelResolver({ - fallback: openrouter, -}); -const lm = resolve("openai/gpt-4.1"); -``` - -### Calculate Cost - -```ts -const cost = calculateCost(usage, m.pricing); -console.log(`Total: $${cost.total.toFixed(6)}`); -``` - -## References - -- [Model Catalog](catalog/overview.md) -- [Filtering](catalog/filtering.md) -- [Providers](catalog/providers.md) -- [Provider Resolution](provider/overview.md) -- [Configuration](provider/configuration.md) -- [OpenRouter](provider/openrouter.md) -- [Cost Calculation](cost/overview.md) -- [Setup Resolver Guide](guides/setup-resolver.md) -- [Filter Models Guide](guides/filter-models.md) -- [Track Costs Guide](guides/track-costs.md) -- [Troubleshooting](troubleshooting.md) +| Topic | Description | +| --------------------------------------------- | --------------------------------------------------------------------------- | +| [Model Catalog](catalog.md) | Model definitions, lookup API, filtering patterns, provider subpath exports | +| [Provider Resolution](provider-resolution.md) | Resolution algorithm, registry configuration, OpenRouter integration | +| [Cost Tracking](cost-tracking.md) | calculateCost() API, types, formula, usage patterns | +| [Troubleshooting](troubleshooting.md) | Common errors and fixes | diff --git a/packages/models/docs/provider-resolution.md b/packages/models/docs/provider-resolution.md new file mode 100644 index 0000000..a7ff2f0 --- /dev/null +++ b/packages/models/docs/provider-resolution.md @@ -0,0 +1,210 @@ +# Provider Resolution + +Provider resolution maps model ID strings to AI SDK `LanguageModel` instances. `createProviderRegistry()` extracts the provider prefix from a model ID and dispatches to the appropriate provider factory. + +## How It Works + +```mermaid +%%{init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#313244', + 'primaryTextColor': '#cdd6f4', + 'primaryBorderColor': '#6c7086', + 'lineColor': '#89b4fa', + 'secondaryColor': '#45475a', + 'tertiaryColor': '#1e1e2e', + 'actorBkg': '#313244', + 'actorBorder': '#89b4fa', + 'actorTextColor': '#cdd6f4', + 'signalColor': '#cdd6f4', + 'signalTextColor': '#cdd6f4' + } +}}%% +sequenceDiagram + participant C as Caller + participant R as ProviderRegistry + participant P as ProviderFactory + + C->>R: registry("openai/gpt-4.1") + R->>R: Extract prefix "openai" + + alt Provider mapped + R->>P: factory("gpt-4.1") + P-->>R: LanguageModel + else No match + R-->>C: Error thrown + end + + R-->>C: LanguageModel +``` + +When `registry("openai/gpt-4.1")` is called: + +1. The model ID is validated (non-empty) +2. The prefix before the first `/` is extracted (`"openai"`) +3. If a provider factory is mapped for that prefix, it receives the model portion (`"gpt-4.1"`) +4. If no provider matches, an error is thrown + +Model IDs without a `/` (e.g. `"gpt-4.1"`) have no prefix to match, so an error is thrown. Always use the full `"provider/model"` format. + +## createProviderRegistry() API + +### ProviderRegistryConfig + +| Option | Type | Default | Description | +| ----------- | ------------- | ------- | ----------------------------------------- | +| `providers` | `ProviderMap` | `{}` | Direct AI SDK provider mappings by prefix | + +A registry with no providers throws on every call. + +### ProviderMap + +`ProviderMap` is `Readonly>`. Keys are provider prefixes that match the portion before `/` in a model ID. + +```ts +const providers: ProviderMap = { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }), +}; +``` + +### ProviderFactory + +`ProviderFactory` is `(modelName: string) => LanguageModel`. AI SDK provider constructors (`createOpenAI`, `createAnthropic`, etc.) return compatible factory functions. + +```ts +import { createOpenAI } from "@ai-sdk/openai"; + +const factory: ProviderFactory = createOpenAI({ apiKey: "..." }); +const lm = factory("gpt-4.1"); +``` + +## Setting Up Providers + +### Install Provider SDKs + +Install the AI SDK providers you want to use: + +```bash +pnpm add @ai-sdk/openai @ai-sdk/anthropic +``` + +### Basic Registry + +```ts +import { createProviderRegistry } from "@funkai/models"; +import { createOpenAI } from "@ai-sdk/openai"; + +const registry = createProviderRegistry({ + providers: { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + }, +}); + +const lm = registry("openai/gpt-4.1"); +``` + +### Multi-Provider Registry + +```ts +import { createProviderRegistry } from "@funkai/models"; +import { createOpenAI } from "@ai-sdk/openai"; +import { createAnthropic } from "@ai-sdk/anthropic"; + +const registry = createProviderRegistry({ + providers: { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }), + }, +}); + +const gpt = registry("openai/gpt-4.1"); +const claude = registry("anthropic/claude-sonnet-4"); +``` + +### Use with Agents + +Pass the registry to `@funkai/agents` by resolving the model before creating the agent: + +```ts +import { agent } from "@funkai/agents"; + +const summarizer = agent({ + name: "summarizer", + model: registry("openai/gpt-4.1"), + prompt: ({ input }) => `Summarize:\n\n${input.text}`, +}); +``` + +## OpenRouter Integration + +OpenRouter acts as a model aggregator, routing requests to the underlying provider. Use the `@openrouter/ai-sdk-provider` package to create an OpenRouter provider instance. + +### API Key Resolution + +`createOpenRouter` resolves the API key in this order: + +1. Explicit `apiKey` in options +2. `OPENROUTER_API_KEY` environment variable + +If neither is set, an error is thrown at call time. + +### Configuration + +| Option | Type | Default | Description | +| -------- | -------- | -------------------------------- | ------------------ | +| `apiKey` | `string` | `process.env.OPENROUTER_API_KEY` | OpenRouter API key | + +Additional options are forwarded directly to the underlying `@openrouter/ai-sdk-provider`. + +### As a Registry Provider + +```ts +import { createProviderRegistry } from "@funkai/models"; +import { createOpenAI } from "@ai-sdk/openai"; +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; + +const registry = createProviderRegistry({ + providers: { + openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + openrouter: createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }), + }, +}); + +const lm = registry("openrouter/anthropic/claude-sonnet-4"); +``` + +Models with an `"openai"` prefix route through `@ai-sdk/openai`. Models with an `"openrouter"` prefix route through OpenRouter. + +### Direct Usage + +Use `createOpenRouter` directly without a registry: + +```ts +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; + +const openrouter = createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }); +const lm = openrouter("openai/gpt-4.1"); +``` + +### Resources + +- [OpenRouter Documentation](https://openrouter.ai/docs) +- [@openrouter/ai-sdk-provider](https://www.npmjs.com/package/@openrouter/ai-sdk-provider) + +## Error Handling + +`createProviderRegistry()` throws in these cases: + +| Condition | Error Message | +| --------------- | ---------------------------------------------------------------- | +| Empty model ID | `Cannot resolve model: model ID is empty` | +| No prefix | `Cannot resolve model "": no provider prefix` | +| Unmapped prefix | `Cannot resolve model "": no provider mapped for ""` | + +## References + +- [Model Catalog](catalog.md) +- [Cost Tracking](cost-tracking.md) +- [Troubleshooting](troubleshooting.md) diff --git a/packages/models/docs/provider/configuration.md b/packages/models/docs/provider/configuration.md index b53f7ae..6a620b0 100644 --- a/packages/models/docs/provider/configuration.md +++ b/packages/models/docs/provider/configuration.md @@ -1,17 +1,16 @@ # Provider Configuration -Configuration options for `createModelResolver()` and how to set up provider mappings. +Configuration options for `createProviderRegistry()` and how to set up provider mappings. ## Key Concepts -### ModelResolverConfig +### ProviderRegistryConfig -| Option | Type | Default | Description | -| ----------- | ------------------------------------ | ----------- | ----------------------------------------- | -| `providers` | `ProviderMap` | `{}` | Direct AI SDK provider mappings by prefix | -| `fallback` | `(modelId: string) => LanguageModel` | `undefined` | Fallback factory for unmapped prefixes | +| Option | Type | Default | Description | +| ----------- | ------------- | ------- | ----------------------------------------- | +| `providers` | `ProviderMap` | `{}` | Direct AI SDK provider mappings by prefix | -Both fields are optional. A resolver with no configuration throws on every call. +A registry with no providers throws on every call. ### ProviderMap @@ -35,7 +34,7 @@ const providers: ProviderMap = { Map each provider explicitly. Unmapped prefixes throw an error: ```ts -const resolve = createModelResolver({ +const resolve = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }), @@ -44,54 +43,32 @@ const resolve = createModelResolver({ }); ``` -### Direct Providers with OpenRouter Fallback +### With OpenRouter -Map preferred providers directly. Unmapped prefixes route through OpenRouter: +Include OpenRouter as a provider using `@openrouter/ai-sdk-provider`: ```ts -const resolve = createModelResolver({ - providers: { - openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), - }, - fallback: openrouter, -}); -``` - -### OpenRouter-Only - -Route all models through OpenRouter: - -```ts -const resolve = createModelResolver({ - fallback: openrouter, -}); -``` - -### Custom Fallback +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; -Use any function as a fallback: - -```ts -const resolve = createModelResolver({ +const registry = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), - }, - fallback: (modelId: string) => { - const provider = createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }); - return provider(modelId); + openrouter: createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }), }, }); + +const lm = registry("openrouter/anthropic/claude-sonnet-4"); ``` ## Error Handling -`createModelResolver()` throws in these cases: +`createProviderRegistry()` throws in these cases: -| Condition | Error Message | -| ---------------------------- | ------------------------------------------------------------------------------------------- | -| Empty model ID | `Cannot resolve model: model ID is empty` | -| No prefix, no fallback | `Cannot resolve model "": no provider prefix and no fallback configured` | -| Unmapped prefix, no fallback | `Cannot resolve model "": no provider mapped for "" and no fallback configured` | +| Condition | Error Message | +| --------------- | ---------------------------------------------------------------- | +| Empty model ID | `Cannot resolve model: model ID is empty` | +| No prefix | `Cannot resolve model "": no provider prefix` | +| Unmapped prefix | `Cannot resolve model "": no provider mapped for ""` | ## References diff --git a/packages/models/docs/provider/openrouter.md b/packages/models/docs/provider/openrouter.md index 80e8e5b..19fa1ba 100644 --- a/packages/models/docs/provider/openrouter.md +++ b/packages/models/docs/provider/openrouter.md @@ -1,83 +1,64 @@ # OpenRouter Integration -OpenRouter acts as a model aggregator, routing requests to the underlying provider. `@funkai/models` provides two exports for OpenRouter integration: `openrouter` (cached singleton) and `createOpenRouter` (factory). +OpenRouter acts as a model aggregator, routing requests to the underlying provider. Use the `@openrouter/ai-sdk-provider` package directly to create an OpenRouter provider instance. ## Key Concepts ### API Key Resolution -Both `openrouter` and `createOpenRouter` resolve the API key in this order: +`createOpenRouter` from `@openrouter/ai-sdk-provider` resolves the API key in this order: -1. Explicit `apiKey` in options (for `createOpenRouter`) +1. Explicit `apiKey` in options 2. `OPENROUTER_API_KEY` environment variable If neither is set, an error is thrown at call time. -### Cached Provider - -The `openrouter` export is a cached resolver. The underlying provider instance is created once and reused across calls. If `OPENROUTER_API_KEY` changes at runtime, the cache invalidates and a new provider is created. - -```ts -const lm = openrouter("openai/gpt-4.1"); -``` - ### Provider Factory -`createOpenRouter` creates a new `OpenRouterProvider` instance. Use this when you need multiple providers with different configurations: +`createOpenRouter` from `@openrouter/ai-sdk-provider` creates a provider instance: ```ts -const provider = createOpenRouter({ apiKey: "sk-..." }); -const lm = provider("openai/gpt-4.1"); +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; + +const openrouter = createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }); +const lm = openrouter("openai/gpt-4.1"); ``` ## Usage -### As a Fallback +### As a Provider in the Registry -The most common pattern is using `openrouter` as the fallback for `createModelResolver()`: +The most common pattern is registering OpenRouter as a provider in `createProviderRegistry()`: ```ts -const resolve = createModelResolver({ +import { createProviderRegistry } from "@funkai/models"; +import { createOpenAI } from "@ai-sdk/openai"; +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; + +const registry = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + openrouter: createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }), }, - fallback: openrouter, }); ``` -Models with an `"openai"` prefix route directly. All other prefixes route through OpenRouter. - -### As the Only Provider - -```ts -const resolve = createModelResolver({ - fallback: openrouter, -}); - -const lm = resolve("anthropic/claude-sonnet-4"); -``` +Models with an `"openai"` prefix route through `@ai-sdk/openai`. Models with an `"openrouter"` prefix route through OpenRouter. ### Direct Usage -Use `openrouter` directly without a resolver: +Use `createOpenRouter` directly without a registry: ```ts -const lm = openrouter("openai/gpt-4.1"); -``` +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; -### Custom Instance - -```ts -const provider = createOpenRouter({ - apiKey: process.env.OPENROUTER_API_KEY, -}); - -const lm = provider("mistral/mistral-large-latest"); +const openrouter = createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }); +const lm = openrouter("openai/gpt-4.1"); ``` ## Configuration -`createOpenRouter` accepts all options from `@openrouter/ai-sdk-provider`: +`createOpenRouter` from `@openrouter/ai-sdk-provider` accepts: | Option | Type | Default | Description | | -------- | -------- | -------------------------------- | ------------------ | diff --git a/packages/models/docs/provider/overview.md b/packages/models/docs/provider/overview.md index 89a72eb..a944d21 100644 --- a/packages/models/docs/provider/overview.md +++ b/packages/models/docs/provider/overview.md @@ -1,6 +1,6 @@ # Provider Resolution -Provider resolution maps model ID strings to AI SDK `LanguageModel` instances. `createModelResolver()` extracts the provider prefix from a model ID and dispatches to the appropriate provider factory. +Provider resolution maps model ID strings to AI SDK `LanguageModel` instances. `createProviderRegistry()` extracts the provider prefix from a model ID and dispatches to the appropriate provider factory. ## Architecture @@ -23,20 +23,16 @@ Provider resolution maps model ID strings to AI SDK `LanguageModel` instances. ` }}%% sequenceDiagram participant C as Caller - participant R as ModelResolver + participant R as ProviderRegistry participant P as ProviderFactory - participant F as Fallback - C->>R: resolve("openai/gpt-4.1") + C->>R: registry("openai/gpt-4.1") R->>R: Extract prefix "openai" alt Provider mapped R->>P: factory("gpt-4.1") P-->>R: LanguageModel - else No match, fallback configured - R->>F: fallback("openai/gpt-4.1") - F-->>R: LanguageModel - else No match, no fallback + else No match R-->>C: Error thrown end @@ -47,17 +43,16 @@ sequenceDiagram ### Resolution Algorithm -When `resolve("openai/gpt-4.1")` is called: +When `registry("openai/gpt-4.1")` is called: 1. The model ID is validated (non-empty) 2. The prefix before the first `/` is extracted (`"openai"`) 3. If a provider factory is mapped for that prefix, it receives the model portion (`"gpt-4.1"`) -4. If no provider matches, the fallback receives the full ID (if configured) -5. If no fallback exists, an error is thrown +4. If no provider matches, an error is thrown ### Model IDs Without a Prefix -Model IDs without a `/` (e.g. `"gpt-4.1"`) skip provider lookup entirely and go directly to the fallback. If no fallback is configured, an error is thrown. +Model IDs without a `/` (e.g. `"gpt-4.1"`) have no prefix to match, so an error is thrown. Always use the full `"provider/model"` format. ### ProviderFactory @@ -83,45 +78,36 @@ const providers: ProviderMap = { ## Usage -### Basic Resolver +### Basic Registry ```ts -const resolve = createModelResolver({ +const registry = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), }, }); -const lm = resolve("openai/gpt-4.1"); +const lm = registry("openai/gpt-4.1"); ``` -### Resolver with Fallback +### Multi-Provider Registry ```ts -const resolve = createModelResolver({ +import { createOpenAI } from "@ai-sdk/openai"; +import { createAnthropic } from "@ai-sdk/anthropic"; + +const registry = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), + anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }), }, - fallback: openrouter, -}); - -const lm1 = resolve("openai/gpt-4.1"); -const lm2 = resolve("anthropic/claude-sonnet-4"); -``` - -`lm1` routes through the direct OpenAI provider. `lm2` has no mapped provider for `"anthropic"`, so it falls through to the OpenRouter fallback. - -### Fallback-Only Resolver - -```ts -const resolve = createModelResolver({ - fallback: openrouter, }); -const lm = resolve("openai/gpt-4.1"); +const lm1 = registry("openai/gpt-4.1"); +const lm2 = registry("anthropic/claude-sonnet-4"); ``` -All models route through OpenRouter regardless of prefix. +`lm1` routes through `@ai-sdk/openai`. `lm2` routes through `@ai-sdk/anthropic`. ## References diff --git a/packages/models/docs/troubleshooting.md b/packages/models/docs/troubleshooting.md index 23acf2a..24df379 100644 --- a/packages/models/docs/troubleshooting.md +++ b/packages/models/docs/troubleshooting.md @@ -4,7 +4,7 @@ Common issues and fixes for `@funkai/models`. ## Cannot resolve model: model ID is empty -The model ID passed to the resolver is an empty string or whitespace. +The model ID passed to the registry is an empty string or whitespace. **Fix:** Ensure the model ID is a non-empty string: @@ -12,30 +12,27 @@ The model ID passed to the resolver is an empty string or whitespace. const lm = resolve("openai/gpt-4.1"); ``` -## Cannot resolve model: no provider prefix and no fallback configured +## Cannot resolve model: no provider prefix -A model ID without a `/` (e.g. `"gpt-4.1"`) was passed to a resolver with no fallback. +A model ID without a `/` (e.g. `"gpt-4.1"`) was passed to the registry. -**Fix:** Either use the full `"provider/model"` format or configure a fallback: +**Fix:** Use the full `"provider/model"` format: ```ts -const resolve = createModelResolver({ - fallback: openrouter, -}); +const lm = registry("openai/gpt-4.1"); ``` -## Cannot resolve model: no provider mapped for "x" and no fallback configured +## Cannot resolve model: no provider mapped for "x" -The model ID prefix does not match any key in the `providers` map and no `fallback` is configured. +The model ID prefix does not match any key in the `providers` map. -**Fix:** Add the provider to the `providers` map or add a fallback: +**Fix:** Add the provider to the `providers` map: ```ts -const resolve = createModelResolver({ +const registry = createProviderRegistry({ providers: { openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }), }, - fallback: openrouter, }); ``` @@ -94,7 +91,6 @@ const id: ModelId = "openai/gpt-4.1"; ## References -- [Model Catalog](catalog/overview.md) -- [Provider Resolution](provider/overview.md) -- [Cost Calculation](cost/overview.md) -- [Setup Resolver Guide](guides/setup-resolver.md) +- [Model Catalog](catalog.md) +- [Provider Resolution](provider-resolution.md) +- [Cost Tracking](cost-tracking.md) diff --git a/packages/models/package.json b/packages/models/package.json index 2a35684..27cd62e 100644 --- a/packages/models/package.json +++ b/packages/models/package.json @@ -123,7 +123,7 @@ "test": "vitest run --typecheck" }, "dependencies": { - "ai": "^6.0.116", + "ai": "^6.0.136", "type-fest": "^5.5.0" }, "devDependencies": { diff --git a/packages/prompts/README.md b/packages/prompts/README.md index d8c393a..f8e9050 100644 --- a/packages/prompts/README.md +++ b/packages/prompts/README.md @@ -39,7 +39,7 @@ You are a {{ tone }} writer. ### Generate typed modules ```bash -npx funkai prompts generate --out .prompts/client --roots src/agents +npx funkai prompts generate --out .prompts/client --includes "src/agents/**" ``` ### Consume prompts @@ -94,8 +94,8 @@ Use `{% render 'name', key: 'value' %}` to include shared partials. Partials res ## Documentation -For comprehensive documentation, see [docs/overview.md](docs/overview.md). +For comprehensive documentation, see the [Prompts concept](/concepts/prompts) and [Prompts CLI reference](/reference/prompts/cli). ## License -[MIT](../../LICENSE) +[MIT](https://github.com/joggrdocs/funkai/blob/main/LICENSE) diff --git a/packages/prompts/docs/cli.md b/packages/prompts/docs/cli.md new file mode 100644 index 0000000..328f647 --- /dev/null +++ b/packages/prompts/docs/cli.md @@ -0,0 +1,132 @@ +# CLI + +The `prompts` CLI discovers, validates, and generates typed TypeScript from `.prompt` files. + +## Installation + +Available as the `prompts` binary from `@funkai/cli`. Install it as a workspace dependency: + +```bash +pnpm add @funkai/cli --workspace +``` + +## Workflow + +```mermaid +%%{init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#313244', + 'primaryTextColor': '#cdd6f4', + 'primaryBorderColor': '#6c7086', + 'lineColor': '#89b4fa', + 'secondaryColor': '#45475a', + 'tertiaryColor': '#1e1e2e', + 'actorBkg': '#313244', + 'actorBorder': '#89b4fa', + 'actorTextColor': '#cdd6f4', + 'signalColor': '#cdd6f4', + 'signalTextColor': '#cdd6f4' + } +}}%% +sequenceDiagram + participant Dev as Developer + participant CLI as prompts CLI + participant FS as File System + + Dev->>CLI: prompts generate + CLI->>FS: Discover .prompt files from --includes globs + CLI->>CLI: Parse frontmatter + extract variables + CLI->>CLI: Lint (schema vs template match) + CLI->>CLI: Flatten partials + CLI->>FS: Write generated .ts modules + FS-->>Dev: Import typed prompts from ~prompts +``` + +## Commands Reference + +### `prompts generate` + +Generate typed TypeScript modules from `.prompt` files. + +**Alias:** `gen` + +| Flag | Alias | Required | Description | +| ------------ | ----- | -------- | ----------------------------------------- | +| `--out` | `-o` | Yes | Output directory for generated files | +| `--includes` | `-r` | Yes | Glob patterns to scan for `.prompt` files | +| `--silent` | --- | No | Suppress output except errors | + +```bash +prompts generate --out .prompts/client --includes "prompts/**" "src/agents/**" "src/workflows/**" +``` + +Custom partials are auto-discovered from the sibling `partials/` directory (relative to `--out`). + +Runs lint validation automatically before generating. Exits with code 1 on lint errors. + +### `prompts lint` + +Validate `.prompt` files without generating output. + +| Flag | Alias | Required | Description | +| ------------ | ----- | -------- | -------------------------------------------------------- | +| `--includes` | `-r` | Yes | Glob patterns to scan for `.prompt` files | +| `--partials` | `-p` | No | Custom partials directory (default: `.prompts/partials`) | +| `--silent` | --- | No | Suppress output except errors | + +**Diagnostics:** + +| Level | Meaning | +| ----- | ---------------------------------------- | +| Error | Template variable not declared in schema | +| Warn | Schema variable not used in template | + +```bash +prompts lint --includes "prompts/**" "src/agents/**" +``` + +### `prompts create` + +Scaffold a new `.prompt` file. + +| Arg/Flag | Required | Description | +| ----------- | -------- | ------------------------------------------------------------- | +| `` | Yes | Prompt name (kebab-case) | +| `--out` | No | Output directory (defaults to cwd) | +| `--partial` | No | Create as a partial in `.prompts/partials/` (ignores `--out`) | + +```bash +prompts create coverage-assessor --out src/agents/coverage-assessor +prompts create summary --partial +``` + +### `prompts setup` + +Interactive project configuration for `.prompt` development. No flags -- fully interactive. + +Configures: + +1. VSCode file association (`*.prompt` -> Markdown) +2. VSCode Liquid extension recommendation +3. `.gitignore` entry for generated `.prompts/client/` directory +4. `tsconfig.json` path alias (`~prompts` -> `./.prompts/client/index.ts`) + +## Integration + +Add a generate script to your `package.json`: + +```json +{ + "scripts": { + "prompts:generate": "prompts generate --out .prompts/client --includes \"prompts/**\" \"src/agents/**\"" + } +} +``` + +## References + +- [File Format](file-format.md) +- [Code Generation & Library](codegen.md) +- [Setup](setup.md) +- [Troubleshooting](troubleshooting.md) diff --git a/packages/prompts/docs/cli/commands.md b/packages/prompts/docs/cli/commands.md index 03e3e76..2165112 100644 --- a/packages/prompts/docs/cli/commands.md +++ b/packages/prompts/docs/cli/commands.md @@ -6,14 +6,14 @@ Generate typed TypeScript modules from `.prompt` files. **Alias:** `gen` -| Flag | Alias | Required | Description | -| ---------- | ----- | -------- | ------------------------------------------------------- | -| `--out` | `-o` | Yes | Output directory for generated files | -| `--roots` | `-r` | Yes | Space-separated directories to scan for `.prompt` files | -| `--silent` | --- | No | Suppress output except errors | +| Flag | Alias | Required | Description | +| ------------ | ----- | -------- | ----------------------------------------- | +| `--out` | `-o` | Yes | Output directory for generated files | +| `--includes` | `-r` | Yes | Glob patterns to scan for `.prompt` files | +| `--silent` | --- | No | Suppress output except errors | ```bash -prompts generate --out .prompts/client --roots prompts src/agents src/workflows +prompts generate --out .prompts/client --includes "prompts/**" "src/agents/**" "src/workflows/**" ``` Custom partials are auto-discovered from the sibling `partials/` directory (relative to `--out`). @@ -26,7 +26,7 @@ Validate `.prompt` files without generating output. | Flag | Alias | Required | Description | | ------------ | ----- | -------- | -------------------------------------------------------- | -| `--roots` | `-r` | Yes | Directories to scan | +| `--includes` | `-r` | Yes | Glob patterns to scan for `.prompt` files | | `--partials` | `-p` | No | Custom partials directory (default: `.prompts/partials`) | | `--silent` | --- | No | Suppress output except errors | @@ -38,7 +38,7 @@ Validate `.prompt` files without generating output. | Warn | Schema variable not used in template | ```bash -prompts lint --roots prompts src/agents +prompts lint --includes "prompts/**" "src/agents/**" ``` ## `prompts create` diff --git a/packages/prompts/docs/cli/overview.md b/packages/prompts/docs/cli/overview.md index 5ebbce3..fa1a21a 100644 --- a/packages/prompts/docs/cli/overview.md +++ b/packages/prompts/docs/cli/overview.md @@ -61,7 +61,7 @@ Add a generate script to your `package.json`: ```json { "scripts": { - "prompts:generate": "prompts generate --out .prompts/client --roots prompts src/agents" + "prompts:generate": "prompts generate --out .prompts/client --includes \"prompts/**\" \"src/agents/**\"" } } ``` diff --git a/packages/prompts/docs/codegen.md b/packages/prompts/docs/codegen.md new file mode 100644 index 0000000..998a9cf --- /dev/null +++ b/packages/prompts/docs/codegen.md @@ -0,0 +1,142 @@ +# Code Generation & Library + +The CLI transforms `.prompt` source files into typed TypeScript modules. This doc covers the pipeline stages, generated output shape, and the runtime library API. + +## Pipeline + +```mermaid +%%{init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#313244', + 'primaryTextColor': '#cdd6f4', + 'primaryBorderColor': '#6c7086', + 'lineColor': '#89b4fa', + 'secondaryColor': '#45475a', + 'tertiaryColor': '#1e1e2e', + 'background': '#1e1e2e', + 'mainBkg': '#313244', + 'clusterBkg': '#1e1e2e', + 'clusterBorder': '#45475a' + }, + 'flowchart': { 'curve': 'basis', 'padding': 15 } +}}%% +flowchart TD + classDef core fill:#313244,stroke:#89b4fa,stroke-width:2px,color:#cdd6f4 + classDef agent fill:#313244,stroke:#a6e3a1,stroke-width:2px,color:#cdd6f4 + + subgraph Per Prompt + A[discoverPrompts]:::core --> B[parseFrontmatter]:::core + B --> C[clean]:::core + C --> D[flattenPartials]:::core + D --> E[extractVariables]:::core + E --> F[lintPrompt]:::core + end + + subgraph Output + F --> G[generatePromptModule]:::agent + F --> H[generateRegistry]:::agent + end +``` + +## Pipeline Stages + +| Stage | Input | Output | Description | +| ----------------- | ---------------------------- | ---------------------------------- | ------------------------------------------------------- | +| Discover | Root directories | `DiscoveredPrompt[]` | Scans for `.prompt` files (max depth 5) | +| Parse Frontmatter | Raw file content | `{ name, group, version, schema }` | Extracts and validates YAML metadata | +| Clean | Raw content | Template string | Strips frontmatter delimiters | +| Flatten Partials | Template with `{% render %}` | Resolved template | Inlines partial content with bound params | +| Extract Variables | Template string | `string[]` | Finds `{{ var }}`, `{% if var %}`, `{% for x in var %}` | +| Lint | Schema + variables | Diagnostics | Checks schema/template variable alignment | + +## Generated Output + +### Per-Prompt Module (`.ts`) + +Each module exports a default object conforming to `PromptModule`: + +| Member | Type | Description | +| --------------------- | ------------------------ | ------------------------------------------------ | +| `name` | `string` (const) | Prompt name from frontmatter | +| `group` | `string \| undefined` | Optional grouping key | +| `schema` | `ZodObject` | Zod schema built from frontmatter `schema` block | +| `render(variables)` | `(Variables) => string` | Validates input then renders via LiquidJS | +| `validate(variables)` | `(unknown) => Variables` | Zod parse only | + +### Registry (`index.ts`) + +Aggregates all per-prompt modules into a single entry point: + +| Export | Type | Description | +| --------- | --------------------- | ------------------------------------------------------------------------------------------------------ | +| `prompts` | `PromptRegistry<...>` | Deep-frozen const object with dot-access, nested by group. Use `typeof prompts` for type-level access. | + +## Output Directory + +Generated files go to the `--out` directory (conventionally `.prompts/client/`). This subdirectory should be gitignored. The parent `.prompts/` directory also holds `partials/` for custom partials (committed to git). Import generated code via the `~prompts` tsconfig alias. + +## Runtime Library API + +The library surface provides the runtime engine and registry used by generated code and consuming packages. + +### Exports + +| Export | Type | Description | +| ---------------------- | ----------------------------------- | -------------------------------------------------------------------------- | +| `engine` | `Liquid` | Shared LiquidJS instance (no filesystem, strict filters) | +| `createEngine` | `(partialsDir, options?) => Liquid` | Factory for filesystem-backed engines (used by CLI for partial resolution) | +| `clean` | `(content: string) => string` | Strips frontmatter, returns render-ready template | +| `createPromptRegistry` | `(modules) => PromptRegistry` | Creates typed registry from prompt module map | + +### Engine + +The shared `engine` instance is configured with `ownPropertyOnly: true` and `strictFilters: true` for security. No filesystem access -- templates are rendered from strings via `parseAndRenderSync`. + +`createEngine` accepts a `partialsDir` and optional overrides. It enables filesystem-backed partial resolution (`.prompt` extension, caching enabled) for use during codegen flattening. + +### Registry + +`createPromptRegistry` accepts a (possibly nested) record of `PromptModule` objects and namespace nodes. It returns a deep-frozen `PromptRegistry` with direct property access: + +```ts +const prompts = createPromptRegistry({ + agents: { coverageAssessor }, + greeting, +}); +prompts.agents.coverageAssessor.render({ scope: "full" }); +prompts.greeting.render(); +``` + +Nesting is driven by the `group` field in frontmatter. Each `/`-separated segment becomes a nesting level, with all names converted to camelCase. The registry is frozen at creation time to prevent mutation. + +## Consumer Import Pattern + +The generated `index.ts` calls `createPromptRegistry` with all prompt modules organized by group and exports a `prompts` const object. Consumers import via the `~prompts` tsconfig alias: + +```ts +import { prompts } from "~prompts"; + +// Flat (no group) +const text = prompts.greeting.render(); + +// Nested (group: agents) +const text = prompts.agents.coverageAssessor.render({ scope: "full" }); +``` + +Types are inferred from the object structure, giving full type safety on `render` and `validate` arguments at every nesting level. + +## Types Reference + +| Type | Description | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `PromptModule` | Interface: `name`, `group`, `schema` (ZodType), `render(vars)`, `validate(vars)` | +| `PromptNamespace` | A nested namespace node -- values are `PromptModule` leaves or further nested namespaces | +| `PromptRegistry` | Deep-readonly mapped type over a `PromptNamespace` tree | +| `CreateEngineOptions` | Options for `createEngine`: `root`, `partials`, `extname`, `cache`, `strictFilters`, `strictVariables`, `ownPropertyOnly` | +| `Liquid` | Re-exported LiquidJS engine type | + +## References + +- [File Format](file-format.md) +- [CLI](cli.md) diff --git a/packages/prompts/docs/file-format.md b/packages/prompts/docs/file-format.md new file mode 100644 index 0000000..f4de721 --- /dev/null +++ b/packages/prompts/docs/file-format.md @@ -0,0 +1,306 @@ +# .prompt File Format + +A `.prompt` file is a LiquidJS template with YAML frontmatter. It is a declarative prompt authoring format compiled to typed TypeScript at build time. + +## File Anatomy + +Every `.prompt` file has two sections: a YAML frontmatter block delimited by `---` fences, and a LiquidJS template body. + +```text +--- +name: coverage-assessor +group: agents/coverage-assessor +schema: + scope: + type: string + description: Assessment scope + target: + type: string + required: false +--- + +You are a coverage assessor for {{ scope }}. +{% if target %}Targeting {{ target }} docs.{% endif %} +``` + +| Section | Description | +| ------------------- | ------------------------------------------------------------- | +| Frontmatter (`---`) | YAML metadata block defining name, group, and variable schema | +| Body | LiquidJS template rendered at runtime with typed variables | + +## Template Syntax + +| Syntax | Purpose | +| --------------------------------------- | ----------------------------------- | +| `{{ var }}` | Variable output | +| `{{ var \| filter }}` | Filtered output | +| `{% if var %}...{% endif %}` | Conditional | +| `{% for item in list %}...{% endfor %}` | Iteration | +| `{% render 'name', key: 'value' %}` | Partial inclusion (build-time only) | + +Strict filters are enabled -- unknown filters throw an error. Variable access is restricted to own properties only. + +## Frontmatter Reference + +The YAML frontmatter block defines metadata and the variable schema. + +### Fields + +| Field | Required | Type | Description | +| --------- | -------- | -------- | ------------------------------------------------ | +| `name` | Yes | `string` | Unique kebab-case identifier (`^[a-z0-9-]+$`) | +| `group` | No | `string` | Namespace path (e.g. `agents/coverage-assessor`) | +| `version` | No | `string` | Version identifier | +| `schema` | No | `object` | Variable declarations map | + +### Validation Rules + +- `name` is required and must match `^[a-z0-9-]+$` +- Frontmatter must be valid YAML between `---` delimiters +- `schema` must be an object (not an array) +- Missing or empty `name` throws a parse error with the file path +- Non-object frontmatter (e.g. a bare string) is rejected + +## Schema Variables + +Each key under `schema` declares a template variable. Two syntaxes are supported. + +**Shorthand** -- type string only, defaults to required: + +```yaml +schema: + scope: string +``` + +**Full object** -- explicit control over all fields: + +```yaml +schema: + scope: + type: string + required: true + description: Assessment scope +``` + +Shorthand `scope: string` expands to `{ type: 'string', required: true }`. + +### Variable Fields + +| Field | Default | Description | +| ------------- | -------- | ---------------------------------------------------- | +| `type` | `string` | Variable type (only `string` supported) | +| `required` | `true` | Whether the variable must be provided at render time | +| `description` | -- | Human-readable description (used in generated JSDoc) | + +## Naming and Discovery + +Names must match `^[a-z0-9-]+$` (lowercase, digits, hyphens). The `name` field in frontmatter is required and takes precedence. A file named `prompt.prompt` derives its name from the parent directory (e.g. `agents/gap-detector/prompt.prompt` becomes `gap-detector`). + +The CLI scans `--includes` glob patterns recursively (max depth 5). Files must have the `.prompt` extension. Symbolic links are skipped. Duplicate names across roots cause an error with paths listed. Results are sorted alphabetically by name. + +### Recommended File Structure + +```text +src/ + agents/ + coverage-assessor/ + prompt.prompt + prompts/ + identity.prompt + constraints.prompt +``` + +## Partials + +Partials are reusable template fragments included with `{% render %}` tags. They are resolved and flattened at build time -- the generated output contains no render tags. + +### Syntax + +```liquid +{% render 'identity', role: 'Coverage Assessor', desc: 'an expert at assessing documentation coverage' %} +``` + +Only literal string parameters are supported. Variable references (e.g. `key: myVar`) are not allowed and throw an error at codegen time. Whitespace trim variants `{%-` and `-%}` are supported. + +### Resolution Order + +Partials are resolved from two locations, searched in order (first match wins): + +| Priority | Location | Description | +| -------- | -------------------- | ------------------------------------------------ | +| 1 | `.prompts/partials/` | Custom project partials (committed to git) | +| 2 | SDK `src/prompts/` | Built-in partials shipped with `@funkai/prompts` | + +Custom partials take precedence -- a custom partial with the same name as a built-in overrides it. + +### Built-in Partials + +| Partial | Parameters | Purpose | +| ------------- | -------------------------------------------------- | --------------------------------------------------- | +| `identity` | `role`, `desc`, `context` (optional) | Agent identity block (`` wrapper) | +| `constraints` | `in_scope`, `out_of_scope`, `rules` (all optional) | Scoping constraints block (`` wrapper) | +| `tools` | `tools` (optional) | Tool listing block (`` wrapper) | + +**identity** source: + +```liquid + +You are {{ role }}, {{ desc }}. +{% if context %} +{{ context }} +{% endif %} + +``` + +**constraints** source: + +```liquid + +{% if in_scope %} +## In Scope +{% for item in in_scope %} +- {{ item }} +{% endfor %} +{% endif %} +{% if out_of_scope %} +## Out of Scope +{% for item in out_of_scope %} +- {{ item }} +{% endfor %} +{% endif %} +{% if rules %} +## Rules +{% for rule in rules %} +- {{ rule }} +{% endfor %} +{% endif %} + +``` + +### Custom Partials + +Place custom `.prompt` files in `.prompts/partials/`: + +```text +.prompts/ + client/ # Generated (gitignored) + partials/ # Custom partials (committed) + summary.prompt +``` + +The CLI auto-discovers this directory: + +- `prompts generate` derives it from `--out` (sibling `partials/` dir) +- `prompts lint` defaults to `.prompts/partials` (configurable via `--partials`) + +**Creating a custom partial:** + +```bash +prompts create summary --partial +``` + +Or create `.prompts/partials/.prompt` by hand: + +```liquid + +{{ content }} +{% if notes %} +Notes: {{ notes }} +{% endif %} + +``` + +Use it in a `.prompt` file: + +```liquid +{% render 'summary', content: 'Analysis complete' %} +``` + +Run `prompts generate` -- the partial is flattened into the generated output. No `{% render %}` tags remain. + +**Overriding built-ins:** Create a file with the same name in `.prompts/partials/` (e.g. `.prompts/partials/identity.prompt`). Custom partials take precedence over SDK built-ins. + +**Adding a built-in partial (SDK contributors):** + +1. Create `packages/prompts/src/prompts/.prompt` +2. Write the partial template using XML-style wrapper tags and Liquid variables +3. Test with a consumer `.prompt` file and run `prompts generate` + +## Authoring Walkthrough + +### Prerequisites + +- `@funkai/prompts` installed +- Project configured ([Setup guide](setup.md)) + +### Steps + +1. **Scaffold** with the CLI: + +```bash +prompts create my-agent --out src/agents/my-agent +``` + +2. **Edit** the frontmatter -- set `name`, `group`, and `schema` variables. + +3. **Write** the template body using `{{ var }}` syntax and conditionals. + +4. **Add partials** if needed: + +```liquid +{% render 'identity', role: 'Analyzer', desc: 'a code analyzer' %} +``` + +5. **Lint:** + +```bash +prompts lint --includes "src/agents/**" +``` + +6. **Generate:** + +```bash +prompts generate --out .prompts/client --includes "src/agents/**" +``` + +7. **Import and use:** + +```ts +import { prompts } from "~prompts"; + +const text = prompts.myAgent.render({ scope: "full" }); +``` + +### Verification + +- `prompts lint` reports no errors +- Generated file exists at `.prompts/client/my-agent.ts` +- TypeScript compiles without errors + +### Troubleshooting + +#### Undefined variable error + +**Fix:** Add the variable to the frontmatter `schema` block. + +#### Duplicate prompt name + +**Fix:** Two `.prompt` files share the same `name` -- rename one to a unique kebab-case identifier. + +#### TypeScript can't find `~prompts` + +**Fix:** Run `prompts setup` or add the path alias to `tsconfig.json`. See [setup.md](setup.md). + +#### Variable reference not supported in partial + +**Fix:** Only literal string params are allowed in `{% render %}` tags. Replace variable references with string literals. + +#### Partial not found + +**Fix:** Verify the file is in `.prompts/partials/` (custom) or `src/prompts/` (built-in) with `.prompt` extension. + +## References + +- [Code Generation & Library](codegen.md) +- [CLI](cli.md) +- [Setup](setup.md) diff --git a/packages/prompts/docs/file-format/overview.md b/packages/prompts/docs/file-format/overview.md index 83ccfa0..cebf8ad 100644 --- a/packages/prompts/docs/file-format/overview.md +++ b/packages/prompts/docs/file-format/overview.md @@ -44,7 +44,7 @@ Names must match `^[a-z0-9-]+$` (lowercase, digits, hyphens). The `name` field i ## Discovery -The CLI scans `--roots` directories recursively (max depth 5). Files must have the `.prompt` extension. Symbolic links are skipped. Duplicate names across roots cause an error with paths listed. +The CLI scans `--includes` glob patterns recursively (max depth 5). Files must have the `.prompt` extension. Symbolic links are skipped. Duplicate names across roots cause an error with paths listed. Results are sorted alphabetically by name. diff --git a/packages/prompts/docs/guides/author-prompt.md b/packages/prompts/docs/guides/author-prompt.md index 8fd2a32..5443493 100644 --- a/packages/prompts/docs/guides/author-prompt.md +++ b/packages/prompts/docs/guides/author-prompt.md @@ -26,13 +26,13 @@ prompts create my-agent --out src/agents/my-agent 5. Lint: ```bash -prompts lint --roots src/agents +prompts lint --includes "src/agents/**" ``` 6. Generate: ```bash -prompts generate --out .prompts/client --roots src/agents +prompts generate --out .prompts/client --includes "src/agents/**" ``` 7. Import and use: diff --git a/packages/prompts/docs/guides/setup-project.md b/packages/prompts/docs/guides/setup-project.md index 1d420dc..1f9910a 100644 --- a/packages/prompts/docs/guides/setup-project.md +++ b/packages/prompts/docs/guides/setup-project.md @@ -50,7 +50,7 @@ Or configure manually (steps 3-6). ```json { "scripts": { - "prompts:generate": "prompts generate --out .prompts/client --roots prompts src/agents" + "prompts:generate": "prompts generate --out .prompts/client --includes \"prompts/**\" \"src/agents/**\"" } } ``` diff --git a/packages/prompts/docs/overview.md b/packages/prompts/docs/overview.md index 352a8fc..c174f2c 100644 --- a/packages/prompts/docs/overview.md +++ b/packages/prompts/docs/overview.md @@ -1,4 +1,4 @@ -# Prompts SDK Overview +# Prompts SDK Prompt authoring SDK with two surfaces: a **CLI** for build-time code generation from `.prompt` files, and a **library** for runtime template rendering with full type safety. @@ -51,28 +51,6 @@ flowchart LR classDef gateway fill:#313244,stroke:#fab387,stroke-width:2px,color:#cdd6f4 ``` -## Package Structure - -``` -📁 packages/prompts/ -├── 📁 src/ -│ ├── 📁 prompts/ # Built-in partials (identity, constraints, tools) -│ ├── 📄 engine.ts # LiquidJS engine factory -│ ├── 📄 registry.ts # Typed prompt registry -│ ├── 📄 clean.ts # Frontmatter stripping pipeline -│ ├── 📄 partials-dir.ts # PARTIALS_DIR export for CLI/consumers -│ ├── 📄 types.ts # PromptModule, PromptNamespace, PromptRegistry types -│ └── 📄 index.ts # Public exports -└── 📁 docs/ - -📁 packages/cli/ # @funkai/cli — CLI binary (see @funkai/cli README) -├── 📁 commands/ # generate, lint, create, setup -├── 📁 src/lib/ # codegen, frontmatter, flatten, lint, paths -└── 📄 index.ts # CLI entry point (kidd-cli) -``` - -> **Note:** The CLI was extracted to `@funkai/cli`. Install it separately for the `prompts` binary. - ## Dual Surface | Surface | When | What | @@ -83,20 +61,15 @@ flowchart LR ## Quick Start 1. Create a `.prompt` file with YAML frontmatter and a LiquidJS template body. -2. Run `prompts generate --out .prompts/client --roots src/agents` to produce typed modules. +2. Run `prompts generate --out .prompts/client --includes "src/agents/**"` to produce typed modules. 3. Import from the `~prompts` alias in your application code. 4. Call `.render({ vars })` with full type safety derived from the Zod schema in frontmatter. -## References +## Documentation -- [File Format](file-format/overview.md) -- [Frontmatter](file-format/frontmatter.md) -- [Partials](file-format/partials.md) -- [CLI](cli/overview.md) -- [CLI Commands](cli/commands.md) -- [Code Generation](codegen/overview.md) -- [Library API](library/overview.md) -- [Guide: Author a Prompt](guides/author-prompt.md) -- [Guide: Setup Project](guides/setup-project.md) -- [Guide: Add a Partial](guides/add-partial.md) -- [Troubleshooting](troubleshooting.md) +| Topic | Description | +| --------------------------------------- | ------------------------------------------------------------------------- | +| [File Format](file-format.md) | .prompt anatomy, frontmatter, schema variables, partials, authoring guide | +| [Code Generation & Library](codegen.md) | Build pipeline, generated output, runtime API, consumer patterns | +| [Project Setup](setup.md) | VSCode, .gitignore, tsconfig, package.json configuration | +| [Troubleshooting](troubleshooting.md) | Common errors and fixes | diff --git a/packages/prompts/docs/setup.md b/packages/prompts/docs/setup.md new file mode 100644 index 0000000..aa25806 --- /dev/null +++ b/packages/prompts/docs/setup.md @@ -0,0 +1,76 @@ +# Setup Prompt Development + +## Prerequisites + +- Node 24 +- pnpm workspace + +## Steps + +1. Install: + +```bash +pnpm add @funkai/prompts --workspace +``` + +2. Run interactive setup: + +```bash +prompts setup +``` + +Or configure manually (steps 3-6). + +3. Add VSCode file association in `.vscode/settings.json`: + +```json +{ + "files.associations": { + "*.prompt": "markdown" + } +} +``` + +4. Add `.prompts/client/` to `.gitignore`. + +5. Add `~prompts` path alias to `tsconfig.json`: + +```json +{ + "compilerOptions": { + "paths": { + "~prompts": ["./.prompts/client/index.ts"] + } + } +} +``` + +6. Add generate script to `package.json`: + +```json +{ + "scripts": { + "prompts:generate": "prompts generate --out .prompts/client --includes \"prompts/**\" \"src/agents/**\"" + } +} +``` + +## Verification + +Run `prompts generate` and verify `.prompts/client/` directory is created with an `index.ts`. + +## Troubleshooting + +### VSCode not highlighting `.prompt` files + +**Fix:** Check `.vscode/settings.json` file association is set correctly. + +### TypeScript can't resolve `~prompts` + +**Fix:** Verify `tsconfig.json` paths alias points to `./.prompts/client/index.ts`. + +## References + +- [CLI](cli.md) +- [File Format](file-format.md) +- [Code Generation & Library](codegen.md) diff --git a/packages/prompts/docs/troubleshooting.md b/packages/prompts/docs/troubleshooting.md index 2a651ef..117662d 100644 --- a/packages/prompts/docs/troubleshooting.md +++ b/packages/prompts/docs/troubleshooting.md @@ -14,7 +14,7 @@ ## Invalid prompt name -**Fix:** Names must match `^[a-z0-9-]+$` — lowercase letters, digits, and hyphens only. +**Fix:** Names must match `^[a-z0-9-]+$` -- lowercase letters, digits, and hyphens only. ## Partial variable reference error diff --git a/packages/prompts/package.json b/packages/prompts/package.json index 0d50e9e..eeb6d6d 100644 --- a/packages/prompts/package.json +++ b/packages/prompts/package.json @@ -47,7 +47,7 @@ }, "dependencies": { "es-toolkit": "catalog:", - "liquidjs": "^10.25.0", + "liquidjs": "^10.25.1", "zod": "catalog:" }, "devDependencies": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 80bdf23..62a182d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -43,10 +43,10 @@ importers: version: 2.30.0(@types/node@25.5.0) '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) '@zpress/kit': - specifier: ^0.2.4 - version: 0.2.4(@rspress/core@2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + specifier: ^0.2.12 + version: 0.2.12(@rspress/core@2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) eslint-plugin-functional: specifier: ^9.0.4 version: 9.0.4(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) @@ -70,13 +70,13 @@ importers: version: 2.8.20 vitest: specifier: 'catalog:' - version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) examples/basic-agent: dependencies: '@ai-sdk/openai': - specifier: ^3.0.0 - version: 3.0.41(zod@4.3.6) + specifier: ^3.0.48 + version: 3.0.48(zod@4.3.6) '@funkai/agents': specifier: workspace:* version: link:../../packages/agents @@ -103,8 +103,8 @@ importers: examples/flow-agent: dependencies: '@ai-sdk/openai': - specifier: ^3.0.0 - version: 3.0.41(zod@4.3.6) + specifier: ^3.0.48 + version: 3.0.48(zod@4.3.6) '@funkai/agents': specifier: workspace:* version: link:../../packages/agents @@ -156,8 +156,8 @@ importers: examples/prompts-basic: dependencies: '@ai-sdk/openai': - specifier: ^3.0.0 - version: 3.0.41(zod@4.3.6) + specifier: ^3.0.48 + version: 3.0.48(zod@4.3.6) '@funkai/agents': specifier: workspace:* version: link:../../packages/agents @@ -190,8 +190,8 @@ importers: examples/prompts-subagents: dependencies: '@ai-sdk/openai': - specifier: ^3.0.0 - version: 3.0.41(zod@4.3.6) + specifier: ^3.0.48 + version: 3.0.48(zod@4.3.6) '@funkai/agents': specifier: workspace:* version: link:../../packages/agents @@ -224,8 +224,8 @@ importers: examples/realworld-cli: dependencies: '@ai-sdk/openai': - specifier: ^3.0.0 - version: 3.0.41(zod@4.3.6) + specifier: ^3.0.48 + version: 3.0.48(zod@4.3.6) '@clack/prompts': specifier: ^1.1.0 version: 1.1.0 @@ -237,13 +237,13 @@ importers: version: link:../../packages/prompts '@hono/node-server': specifier: ^1.19.11 - version: 1.19.11(hono@4.12.8) + version: 1.19.11(hono@4.12.9) dotenv: specifier: ^17.3.1 version: 17.3.1 hono: - specifier: ^4.12.8 - version: 4.12.8 + specifier: ^4.12.9 + version: 4.12.9 zod: specifier: 'catalog:' version: 4.3.6 @@ -273,8 +273,8 @@ importers: examples/streaming: dependencies: '@ai-sdk/openai': - specifier: ^3.0.0 - version: 3.0.41(zod@4.3.6) + specifier: ^3.0.48 + version: 3.0.48(zod@4.3.6) '@funkai/agents': specifier: workspace:* version: link:../../packages/agents @@ -304,8 +304,8 @@ importers: specifier: workspace:* version: link:../models ai: - specifier: ^6.0.116 - version: 6.0.116(zod@4.3.6) + specifier: ^6.0.136 + version: 6.0.136(zod@4.3.6) es-toolkit: specifier: 'catalog:' version: 1.45.1 @@ -327,7 +327,7 @@ importers: version: 25.5.0 '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) tsdown: specifier: 'catalog:' version: 0.21.4(typescript@5.9.3) @@ -339,7 +339,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) packages/cli: dependencies: @@ -351,13 +351,13 @@ importers: version: link:../prompts '@kidd-cli/core': specifier: ^0.10.0 - version: 0.10.0(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 0.10.0(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) es-toolkit: specifier: 'catalog:' version: 1.45.1 liquidjs: - specifier: ^10.25.0 - version: 10.25.0 + specifier: ^10.25.1 + version: 10.25.1 picomatch: specifier: ^4.0.3 version: 4.0.3 @@ -365,15 +365,15 @@ importers: specifier: 'catalog:' version: 5.9.0 yaml: - specifier: ^2.8.2 - version: 2.8.2 + specifier: ^2.8.3 + version: 2.8.3 zod: specifier: 'catalog:' version: 4.3.6 devDependencies: '@kidd-cli/cli': specifier: ^0.4.9 - version: 0.4.9(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(typescript@5.9.3)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 0.4.9(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(typescript@5.9.3)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) '@types/node': specifier: 'catalog:' version: 25.5.0 @@ -382,7 +382,7 @@ importers: version: 4.0.2 '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) tsdown: specifier: 'catalog:' version: 0.21.4(typescript@5.9.3) @@ -391,7 +391,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) packages/config: dependencies: @@ -412,8 +412,8 @@ importers: packages/models: dependencies: ai: - specifier: ^6.0.116 - version: 6.0.116(zod@4.3.6) + specifier: ^6.0.136 + version: 6.0.136(zod@4.3.6) type-fest: specifier: ^5.5.0 version: 5.5.0 @@ -423,7 +423,7 @@ importers: version: 25.5.0 '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) tsdown: specifier: 'catalog:' version: 0.21.4(typescript@5.9.3) @@ -435,7 +435,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) packages/prompts: dependencies: @@ -443,8 +443,8 @@ importers: specifier: 'catalog:' version: 1.45.1 liquidjs: - specifier: ^10.25.0 - version: 10.25.0 + specifier: ^10.25.1 + version: 10.25.1 zod: specifier: 'catalog:' version: 4.3.6 @@ -454,7 +454,7 @@ importers: version: 25.5.0 '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + version: 4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) tsdown: specifier: 'catalog:' version: 0.21.4(typescript@5.9.3) @@ -463,7 +463,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) packages/tsconfig: {} @@ -473,20 +473,20 @@ packages: resolution: {integrity: sha512-zRF+ClRh0fcmvoKclOcmy2hmTDN48ZfHD3y1fC3Lx0vIYaX55uywssiyaA18WlV2mD+N9H4fgPxq+9JeGfMGlQ==} hasBin: true - '@ai-sdk/gateway@3.0.66': - resolution: {integrity: sha512-SIQ0YY0iMuv+07HLsZ+bB990zUJ6S4ujORAh+Jv1V2KGNn73qQKnGO0JBk+w+Res8YqOFSycwDoWcFlQrVxS4A==} + '@ai-sdk/gateway@3.0.78': + resolution: {integrity: sha512-wqfkgOyqWKKxGL47k8biYcm5i5ZHXjs58ZiQUroDoIcu158EpCZa2qDxdcmeLHDMzx7Pu5Ei/JUmA7OrZ5d8xA==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/openai@3.0.41': - resolution: {integrity: sha512-IZ42A+FO+vuEQCVNqlnAPYQnnUpUfdJIwn1BEDOBywiEHa23fw7PahxVtlX9zm3/zMvTW4JKPzWyvAgDu+SQ2A==} + '@ai-sdk/openai@3.0.48': + resolution: {integrity: sha512-ALmj/53EXpcRqMbGpPJPP4UOSWw0q4VGpnDo7YctvsynjkrKDmoneDG/1a7VQnSPYHnJp6tTRMf5ZdxZ5whulg==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/provider-utils@4.0.19': - resolution: {integrity: sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg==} + '@ai-sdk/provider-utils@4.0.21': + resolution: {integrity: sha512-MtFUYI1/8mgDvRmaBDjbLJPFFrMG777AvSgyIFQtZHIMzm88R/12vYBBpnk7pfiWLFE1DSZzY4WDYzGbKAcmiw==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 @@ -557,6 +557,9 @@ packages: resolution: {integrity: sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==} engines: {node: '>=18'} + '@braintree/sanitize-url@6.0.4': + resolution: {integrity: sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==} + '@changesets/apply-release-plan@7.1.0': resolution: {integrity: sha512-yq8ML3YS7koKQ/9bk1PqO0HMzApIFNwjlwCnwFEXMzNe8NpzeeYYKCmnhWJGkN8g7E51MnWaSbqRcTcdIxUgnQ==} @@ -662,11 +665,11 @@ packages: '@clerc/utils@1.3.1': resolution: {integrity: sha512-wkK6daYkmTQKnhSADMkunfDhNJI6rRCn2R++7cI2EoEBmOZWYqn7frkk5ac7zsxBi0Mc3UnMVaJiNFU+t6PPWQ==} - '@emnapi/core@1.9.0': - resolution: {integrity: sha512-0DQ98G9ZQZOxfUcQn1waV2yS8aWdZ6kJMbYCJB3oUBecjWYO1fqJ+a1DRfPF3O5JEkwqwP1A9QEN/9mYm2Yd0w==} + '@emnapi/core@1.9.1': + resolution: {integrity: sha512-mukuNALVsoix/w1BJwFzwXBN/dHeejQtuVzcDsfOEsdpCumXb/E9j8w11h5S54tT1xhifGfbbSm/ICrObRb3KA==} - '@emnapi/runtime@1.9.0': - resolution: {integrity: sha512-QN75eB0IH2ywSpRpNddCRfQIhmJYBCJ1x5Lb3IscKAL8bMnVAKnRg8dCoXbHzVLLH7P38N2Z3mtulB7W0J0FKw==} + '@emnapi/runtime@1.9.1': + resolution: {integrity: sha512-VYi5+ZVLhpgK4hQ0TAjiQiZ6ol0oe4mBx7mVv7IflsiEp0OWoVsp/+f9Vc1hOhE0TtkORVrI1GvzyreqpgWtkA==} '@emnapi/wasi-threads@1.2.0': resolution: {integrity: sha512-N10dEJNSsUx41Z6pZsXU8FjPjpBEplgH24sfkmITrBED1/U2Esum9F3lfLrMjKHHjmi557zQn7kR9R+XWXu5Rg==} @@ -920,8 +923,8 @@ packages: '@iconify-json/pixelarticons@1.2.4': resolution: {integrity: sha512-nADJKEI3mlxDSmJMLqGvNuvNl1TGa+VbFJIwrtj/8vUA4m15qPNiwxMyw8YxW3gVokuO9MNU462B8uK1hb4rqw==} - '@iconify-json/simple-icons@1.2.74': - resolution: {integrity: sha512-yqaohfY6jnYjTVpuTkaBQHrWbdUrQyWXhau0r/0EZiNWYXPX/P8WWwl1DoLH5CbvDjjcWQw5J0zADhgCUklOqA==} + '@iconify-json/simple-icons@1.2.75': + resolution: {integrity: sha512-KvcCUbvcBWb0sbqLIxHoY8z5/piXY08wcY9gfMhF+ph3AfzGMaSmZFkUY71HSXAljQngXkgs4bdKdekO0HQWvg==} '@iconify-json/skill-icons@1.2.4': resolution: {integrity: sha512-S6iRKHGlGCb/zfx3Isv1TBe5PunbDE5FEwJcaXji60+yRZMrcFFtYEI1xivKqaGKXyVZ508yjGDItsehQdJmSg==} @@ -995,20 +998,6 @@ packages: vitest: optional: true - '@kidd-cli/core@0.7.1': - resolution: {integrity: sha512-05jFvU1XMxdwYhwoc/zQRRYKIaIrApnxNttXw3nGC0tyxhDR3ZwxEklJxbC2D5GWnlFIOF4XHSoGKhXve5b4Ng==} - peerDependencies: - jiti: '>=2.0.0' - pino: '>=9.0.0' - vitest: '>=2.0.0' - peerDependenciesMeta: - jiti: - optional: true - pino: - optional: true - vitest: - optional: true - '@kidd-cli/utils@0.1.5': resolution: {integrity: sha512-s5lMdcz7sFcis7v6bHy1G9It3PRleRfnVh7RcCBtmzh02d73fB0s//0U2+0H4FsWxPfmkZdskCmVVOXTCn4kTg==} @@ -1968,128 +1957,128 @@ packages: '@rolldown/pluginutils@1.0.0-rc.9': resolution: {integrity: sha512-w6oiRWgEBl04QkFZgmW+jnU1EC9b57Oihi2ot3HNWIQRqgHp5PnYDia5iZ5FF7rpa4EQdiqMDXjlqKGXBhsoXw==} - '@rollup/rollup-android-arm-eabi@4.59.0': - resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} + '@rollup/rollup-android-arm-eabi@4.60.0': + resolution: {integrity: sha512-WOhNW9K8bR3kf4zLxbfg6Pxu2ybOUbB2AjMDHSQx86LIF4rH4Ft7vmMwNt0loO0eonglSNy4cpD3MKXXKQu0/A==} cpu: [arm] os: [android] - '@rollup/rollup-android-arm64@4.59.0': - resolution: {integrity: sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==} + '@rollup/rollup-android-arm64@4.60.0': + resolution: {integrity: sha512-u6JHLll5QKRvjciE78bQXDmqRqNs5M/3GVqZeMwvmjaNODJih/WIrJlFVEihvV0MiYFmd+ZyPr9wxOVbPAG2Iw==} cpu: [arm64] os: [android] - '@rollup/rollup-darwin-arm64@4.59.0': - resolution: {integrity: sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==} + '@rollup/rollup-darwin-arm64@4.60.0': + resolution: {integrity: sha512-qEF7CsKKzSRc20Ciu2Zw1wRrBz4g56F7r/vRwY430UPp/nt1x21Q/fpJ9N5l47WWvJlkNCPJz3QRVw008fi7yA==} cpu: [arm64] os: [darwin] - '@rollup/rollup-darwin-x64@4.59.0': - resolution: {integrity: sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==} + '@rollup/rollup-darwin-x64@4.60.0': + resolution: {integrity: sha512-WADYozJ4QCnXCH4wPB+3FuGmDPoFseVCUrANmA5LWwGmC6FL14BWC7pcq+FstOZv3baGX65tZ378uT6WG8ynTw==} cpu: [x64] os: [darwin] - '@rollup/rollup-freebsd-arm64@4.59.0': - resolution: {integrity: sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==} + '@rollup/rollup-freebsd-arm64@4.60.0': + resolution: {integrity: sha512-6b8wGHJlDrGeSE3aH5mGNHBjA0TTkxdoNHik5EkvPHCt351XnigA4pS7Wsj/Eo9Y8RBU6f35cjN9SYmCFBtzxw==} cpu: [arm64] os: [freebsd] - '@rollup/rollup-freebsd-x64@4.59.0': - resolution: {integrity: sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==} + '@rollup/rollup-freebsd-x64@4.60.0': + resolution: {integrity: sha512-h25Ga0t4jaylMB8M/JKAyrvvfxGRjnPQIR8lnCayyzEjEOx2EJIlIiMbhpWxDRKGKF8jbNH01NnN663dH638mA==} cpu: [x64] os: [freebsd] - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': - resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==} + '@rollup/rollup-linux-arm-gnueabihf@4.60.0': + resolution: {integrity: sha512-RzeBwv0B3qtVBWtcuABtSuCzToo2IEAIQrcyB/b2zMvBWVbjo8bZDjACUpnaafaxhTw2W+imQbP2BD1usasK4g==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm-musleabihf@4.59.0': - resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==} + '@rollup/rollup-linux-arm-musleabihf@4.60.0': + resolution: {integrity: sha512-Sf7zusNI2CIU1HLzuu9Tc5YGAHEZs5Lu7N1ssJG4Tkw6e0MEsN7NdjUDDfGNHy2IU+ENyWT+L2obgWiguWibWQ==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm64-gnu@4.59.0': - resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==} + '@rollup/rollup-linux-arm64-gnu@4.60.0': + resolution: {integrity: sha512-DX2x7CMcrJzsE91q7/O02IJQ5/aLkVtYFryqCjduJhUfGKG6yJV8hxaw8pZa93lLEpPTP/ohdN4wFz7yp/ry9A==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-arm64-musl@4.59.0': - resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==} + '@rollup/rollup-linux-arm64-musl@4.60.0': + resolution: {integrity: sha512-09EL+yFVbJZlhcQfShpswwRZ0Rg+z/CsSELFCnPt3iK+iqwGsI4zht3secj5vLEs957QvFFXnzAT0FFPIxSrkQ==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-loong64-gnu@4.59.0': - resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==} + '@rollup/rollup-linux-loong64-gnu@4.60.0': + resolution: {integrity: sha512-i9IcCMPr3EXm8EQg5jnja0Zyc1iFxJjZWlb4wr7U2Wx/GrddOuEafxRdMPRYVaXjgbhvqalp6np07hN1w9kAKw==} cpu: [loong64] os: [linux] - '@rollup/rollup-linux-loong64-musl@4.59.0': - resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==} + '@rollup/rollup-linux-loong64-musl@4.60.0': + resolution: {integrity: sha512-DGzdJK9kyJ+B78MCkWeGnpXJ91tK/iKA6HwHxF4TAlPIY7GXEvMe8hBFRgdrR9Ly4qebR/7gfUs9y2IoaVEyog==} cpu: [loong64] os: [linux] - '@rollup/rollup-linux-ppc64-gnu@4.59.0': - resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==} + '@rollup/rollup-linux-ppc64-gnu@4.60.0': + resolution: {integrity: sha512-RwpnLsqC8qbS8z1H1AxBA1H6qknR4YpPR9w2XX0vo2Sz10miu57PkNcnHVaZkbqyw/kUWfKMI73jhmfi9BRMUQ==} cpu: [ppc64] os: [linux] - '@rollup/rollup-linux-ppc64-musl@4.59.0': - resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==} + '@rollup/rollup-linux-ppc64-musl@4.60.0': + resolution: {integrity: sha512-Z8pPf54Ly3aqtdWC3G4rFigZgNvd+qJlOE52fmko3KST9SoGfAdSRCwyoyG05q1HrrAblLbk1/PSIV+80/pxLg==} cpu: [ppc64] os: [linux] - '@rollup/rollup-linux-riscv64-gnu@4.59.0': - resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==} + '@rollup/rollup-linux-riscv64-gnu@4.60.0': + resolution: {integrity: sha512-3a3qQustp3COCGvnP4SvrMHnPQ9d1vzCakQVRTliaz8cIp/wULGjiGpbcqrkv0WrHTEp8bQD/B3HBjzujVWLOA==} cpu: [riscv64] os: [linux] - '@rollup/rollup-linux-riscv64-musl@4.59.0': - resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==} + '@rollup/rollup-linux-riscv64-musl@4.60.0': + resolution: {integrity: sha512-pjZDsVH/1VsghMJ2/kAaxt6dL0psT6ZexQVrijczOf+PeP2BUqTHYejk3l6TlPRydggINOeNRhvpLa0AYpCWSQ==} cpu: [riscv64] os: [linux] - '@rollup/rollup-linux-s390x-gnu@4.59.0': - resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==} + '@rollup/rollup-linux-s390x-gnu@4.60.0': + resolution: {integrity: sha512-3ObQs0BhvPgiUVZrN7gqCSvmFuMWvWvsjG5ayJ3Lraqv+2KhOsp+pUbigqbeWqueGIsnn+09HBw27rJ+gYK4VQ==} cpu: [s390x] os: [linux] - '@rollup/rollup-linux-x64-gnu@4.59.0': - resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} + '@rollup/rollup-linux-x64-gnu@4.60.0': + resolution: {integrity: sha512-EtylprDtQPdS5rXvAayrNDYoJhIz1/vzN2fEubo3yLE7tfAw+948dO0g4M0vkTVFhKojnF+n6C8bDNe+gDRdTg==} cpu: [x64] os: [linux] - '@rollup/rollup-linux-x64-musl@4.59.0': - resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==} + '@rollup/rollup-linux-x64-musl@4.60.0': + resolution: {integrity: sha512-k09oiRCi/bHU9UVFqD17r3eJR9bn03TyKraCrlz5ULFJGdJGi7VOmm9jl44vOJvRJ6P7WuBi/s2A97LxxHGIdw==} cpu: [x64] os: [linux] - '@rollup/rollup-openbsd-x64@4.59.0': - resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==} + '@rollup/rollup-openbsd-x64@4.60.0': + resolution: {integrity: sha512-1o/0/pIhozoSaDJoDcec+IVLbnRtQmHwPV730+AOD29lHEEo4F5BEUB24H0OBdhbBBDwIOSuf7vgg0Ywxdfiiw==} cpu: [x64] os: [openbsd] - '@rollup/rollup-openharmony-arm64@4.59.0': - resolution: {integrity: sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==} + '@rollup/rollup-openharmony-arm64@4.60.0': + resolution: {integrity: sha512-pESDkos/PDzYwtyzB5p/UoNU/8fJo68vcXM9ZW2V0kjYayj1KaaUfi1NmTUTUpMn4UhU4gTuK8gIaFO4UGuMbA==} cpu: [arm64] os: [openharmony] - '@rollup/rollup-win32-arm64-msvc@4.59.0': - resolution: {integrity: sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==} + '@rollup/rollup-win32-arm64-msvc@4.60.0': + resolution: {integrity: sha512-hj1wFStD7B1YBeYmvY+lWXZ7ey73YGPcViMShYikqKT1GtstIKQAtfUI6yrzPjAy/O7pO0VLXGmUVWXQMaYgTQ==} cpu: [arm64] os: [win32] - '@rollup/rollup-win32-ia32-msvc@4.59.0': - resolution: {integrity: sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==} + '@rollup/rollup-win32-ia32-msvc@4.60.0': + resolution: {integrity: sha512-SyaIPFoxmUPlNDq5EHkTbiKzmSEmq/gOYFI/3HHJ8iS/v1mbugVa7dXUzcJGQfoytp9DJFLhHH4U3/eTy2Bq4w==} cpu: [ia32] os: [win32] - '@rollup/rollup-win32-x64-gnu@4.59.0': - resolution: {integrity: sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==} + '@rollup/rollup-win32-x64-gnu@4.60.0': + resolution: {integrity: sha512-RdcryEfzZr+lAr5kRm2ucN9aVlCCa2QNq4hXelZxb8GG0NJSazq44Z3PCCc8wISRuCVnGs0lQJVX5Vp6fKA+IA==} cpu: [x64] os: [win32] - '@rollup/rollup-win32-x64-msvc@4.59.0': - resolution: {integrity: sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==} + '@rollup/rollup-win32-x64-msvc@4.60.0': + resolution: {integrity: sha512-PrsWNQ8BuE00O3Xsx3ALh2Df8fAj9+cvvX9AIA6o4KpATR98c9mud4XtDWVvsEuyia5U4tVSTKygawyJkjm60w==} cpu: [x64] os: [win32] @@ -2184,13 +2173,13 @@ packages: webpack-hot-middleware: optional: true - '@rspress/core@2.0.5': - resolution: {integrity: sha512-2ezGmANmIrWmhsUrvlRb9Df4xsun1BDgEertDc890aQqtKcNrbu+TBRsOoO+E/N6ioavun7JGGe1wWjvxubCHw==} + '@rspress/core@2.0.6': + resolution: {integrity: sha512-QtVu2V3N3ZTAE6wmiOzDABPBrSLxyMgK8x2LoRaDmu2xc/iklF46Is2oYgEJRJgip0fCuAhihvVwAtmuP61jrg==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true - '@rspress/shared@2.0.5': - resolution: {integrity: sha512-Wdhh+VjU8zJWoVLhv9KJTRAZQ4X2V/Z81Lo2D0hQsa0Kj5F3EaxlMt5/dhX7DoflqNuZPZk/e7CSUB+gO/Umlg==} + '@rspress/shared@2.0.6': + resolution: {integrity: sha512-jCVJP08/LmrU0Xc6tP+B2v0YDadXiayA4mUCgiSlaCPHALDuFVWAq+OSVL8XpWa4QY/3gbAmsnji5x8LBtbciA==} '@shikijs/core@4.0.2': resolution: {integrity: sha512-hxT0YF4ExEqB8G/qFdtJvpmHXBYJ2lWW7qTHDarVkIudPFE6iCIrqdgWxGn5s+ppkGXI0aEGlibI0PAyzP3zlw==} @@ -2273,8 +2262,17 @@ packages: '@types/chai@5.2.3': resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} - '@types/debug@4.1.12': - resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + '@types/d3-scale-chromatic@3.1.0': + resolution: {integrity: sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==} + + '@types/d3-scale@4.0.9': + resolution: {integrity: sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==} + + '@types/d3-time@3.0.4': + resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} + + '@types/debug@4.1.13': + resolution: {integrity: sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw==} '@types/deep-eql@4.0.2': resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} @@ -2297,6 +2295,9 @@ packages: '@types/json-schema@7.0.15': resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + '@types/mdast@3.0.15': + resolution: {integrity: sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==} + '@types/mdast@4.0.4': resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==} @@ -2318,54 +2319,57 @@ packages: '@types/react@19.2.14': resolution: {integrity: sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==} + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/unist@2.0.11': resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} '@types/unist@3.0.3': resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} - '@typescript-eslint/project-service@8.57.1': - resolution: {integrity: sha512-vx1F37BRO1OftsYlmG9xay1TqnjNVlqALymwWVuYTdo18XuKxtBpCj1QlzNIEHlvlB27osvXFWptYiEWsVdYsg==} + '@typescript-eslint/project-service@8.57.2': + resolution: {integrity: sha512-FuH0wipFywXRTHf+bTTjNyuNQQsQC3qh/dYzaM4I4W0jrCqjCVuUh99+xd9KamUfmCGPvbO8NDngo/vsnNVqgw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/scope-manager@8.57.1': - resolution: {integrity: sha512-hs/QcpCwlwT2L5S+3fT6gp0PabyGk4Q0Rv2doJXA0435/OpnSR3VRgvrp8Xdoc3UAYSg9cyUjTeFXZEPg/3OKg==} + '@typescript-eslint/scope-manager@8.57.2': + resolution: {integrity: sha512-snZKH+W4WbWkrBqj4gUNRIGb/jipDW3qMqVJ4C9rzdFc+wLwruxk+2a5D+uoFcKPAqyqEnSb4l2ULuZf95eSkw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/tsconfig-utils@8.57.1': - resolution: {integrity: sha512-0lgOZB8cl19fHO4eI46YUx2EceQqhgkPSuCGLlGi79L2jwYY1cxeYc1Nae8Aw1xjgW3PKVDLlr3YJ6Bxx8HkWg==} + '@typescript-eslint/tsconfig-utils@8.57.2': + resolution: {integrity: sha512-3Lm5DSM+DCowsUOJC+YqHHnKEfFh5CoGkj5Z31NQSNF4l5wdOwqGn99wmwN/LImhfY3KJnmordBq/4+VDe2eKw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/type-utils@8.57.1': - resolution: {integrity: sha512-+Bwwm0ScukFdyoJsh2u6pp4S9ktegF98pYUU0hkphOOqdMB+1sNQhIz8y5E9+4pOioZijrkfNO/HUJVAFFfPKA==} + '@typescript-eslint/type-utils@8.57.2': + resolution: {integrity: sha512-Co6ZCShm6kIbAM/s+oYVpKFfW7LBc6FXoPXjTRQ449PPNBY8U0KZXuevz5IFuuUj2H9ss40atTaf9dlGLzbWZg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/types@8.57.1': - resolution: {integrity: sha512-S29BOBPJSFUiblEl6RzPPjJt6w25A6XsBqRVDt53tA/tlL8q7ceQNZHTjPeONt/3S7KRI4quk+yP9jK2WjBiPQ==} + '@typescript-eslint/types@8.57.2': + resolution: {integrity: sha512-/iZM6FnM4tnx9csuTxspMW4BOSegshwX5oBDznJ7S4WggL7Vczz5d2W11ecc4vRrQMQHXRSxzrCsyG5EsPPTbA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/typescript-estree@8.57.1': - resolution: {integrity: sha512-ybe2hS9G6pXpqGtPli9Gx9quNV0TWLOmh58ADlmZe9DguLq0tiAKVjirSbtM1szG6+QH6rVXyU6GTLQbWnMY+g==} + '@typescript-eslint/typescript-estree@8.57.2': + resolution: {integrity: sha512-2MKM+I6g8tJxfSmFKOnHv2t8Sk3T6rF20A1Puk0svLK+uVapDZB/4pfAeB7nE83uAZrU6OxW+HmOd5wHVdXwXA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/utils@8.57.1': - resolution: {integrity: sha512-XUNSJ/lEVFttPMMoDVA2r2bwrl8/oPx8cURtczkSEswY5T3AeLmCy+EKWQNdL4u0MmAHOjcWrqJp2cdvgjn8dQ==} + '@typescript-eslint/utils@8.57.2': + resolution: {integrity: sha512-krRIbvPK1ju1WBKIefiX+bngPs+odIQUtR7kymzPfo1POVw3jlF+nLkmexdSSd4UCbDcQn+wMBATOOmpBbqgKg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/visitor-keys@8.57.1': - resolution: {integrity: sha512-YWnmJkXbofiz9KbnbbwuA2rpGkFPLbAIetcCNO6mJ8gdhdZ/v7WDXsoGFAJuM6ikUFKTlSQnjWnVO4ux+UzS6A==} + '@typescript-eslint/visitor-keys@8.57.2': + resolution: {integrity: sha512-zhahknjobV2FiD6Ee9iLbS7OV9zi10rG26odsQdfBO/hjSzUQbkIYgda+iNKK1zNiW2ey+Lf8MU5btN17V3dUw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@ungap/structured-clone@1.3.0': @@ -2421,41 +2425,41 @@ packages: '@vitest/utils@4.1.0': resolution: {integrity: sha512-XfPXT6a8TZY3dcGY8EdwsBulFCIw+BeeX0RZn2x/BtiY/75YGh8FeWGG8QISN/WhaqSrE2OrlDgtF8q5uhOTmw==} - '@zpress/cli@0.3.4': - resolution: {integrity: sha512-x/XEZuaHMF3IoOV6AodGRmat9y7B2QSwvU8qFj3aLdKTuL76Cb4rAspJ3h1K9Wr/iob6Xi7c9XxCQKpbaNM3tA==} + '@zpress/cli@0.5.3': + resolution: {integrity: sha512-8xefK2xdoc1bDzeyoX6NRal6d2ssO49Ro+trkU8TKJDLQmIsE7Me14jnI0tlMB7xdt1x3ZPwAC4xT6vm1vw6dA==} engines: {node: '>=24.0.0'} hasBin: true - '@zpress/config@0.3.0': - resolution: {integrity: sha512-EDHWiuesaCcgFHJlw7wXsr+NzEDPwaCeEV+5y/8lswiwZPNcxIobbXwPDPN4G6A1Qj8bNHQwcuyO2mgkylbdCA==} + '@zpress/config@0.5.0': + resolution: {integrity: sha512-xjXGbBPJKiO1jlC4DxvVhGM4PVLkWLGEaSwE2h1Xf2D2NwRU6XUN30fYpV5MUN1hfiYZIArTM6GeFO8VBEzOkw==} engines: {node: '>=24.0.0'} - '@zpress/core@0.7.0': - resolution: {integrity: sha512-QwmspM0uXXzFsShI4TUebk9FGXCor+GFCrGubKiLzJuk6RJBAkuBZxILJwkvdNWco6+pykZwkLJ34tiLSwA5sw==} + '@zpress/core@0.8.0': + resolution: {integrity: sha512-Eujll4afNbs/UolhFsb7TxkU/QqT89XKAcDfNUVPs5v6ViaKGjyD7XfOyltRNxq018+jssrEBbV0Ng1wx/Tqjw==} engines: {node: '>=24.0.0'} - '@zpress/kit@0.2.4': - resolution: {integrity: sha512-/rF5fKAW2WZLkPkpZ2lGHXicNqCBdhSfhKBznWRRQWNoHaEQ1eQ30a8iTpAZ8xxGDaA/FG1JkvMFGd+Zx+5U7g==} + '@zpress/kit@0.2.12': + resolution: {integrity: sha512-0Pq0gFTjM43Ty8BXVtfN2oMnYkQUb2+CiBTbqWlDRr5fp50aSpJs9+2ZvBxMGRTXOmkkbXPN8nTg7zjw4QgnAg==} engines: {node: '>=24.0.0'} hasBin: true peerDependencies: - '@rspress/core': ^2.0.5 + '@rspress/core': ^2.0.6 react: ^19.2.4 react-dom: ^19.2.4 - '@zpress/templates@0.1.1': - resolution: {integrity: sha512-pC7HJMxJih7gvqkcpyQhKsXSYHXlhCDBddL1lfBLJQSSzRyjS+qIgP4gYVj1DZQtS/bi6U0pF45wZLbTXXxdEQ==} + '@zpress/templates@0.1.2': + resolution: {integrity: sha512-EUYjuBpGvYPDS6L3t5ncFgAT9Bi6ZfDblQ83YdlXUk7Ri4nCI93cVuJSfzicK6jwzw5HNw5bfG2uTq7za5YFwA==} engines: {node: '>=24.0.0'} - '@zpress/theme@0.3.1': - resolution: {integrity: sha512-RMb9iAEUHgoWC0ODGx4AqXm6RcBH5Vd3fNVsNQmJNZQqtJjQO6cualq3qQrBO3OJYktdZMSjeH9gMMBmZacHOA==} + '@zpress/theme@0.3.2': + resolution: {integrity: sha512-YLQkQtvNdVz0+uojvCFIKQy+E/CXlG1KY0bcAqxIsIJOBjzA+3p1Ee7oge9CUrjtN/sRMSPNqsNGPdkSQ8Hi8w==} engines: {node: '>=24.0.0'} - '@zpress/ui@0.7.0': - resolution: {integrity: sha512-BeyQynzyL0vQUeShb+AT9n6HA/Kp8wrToXUSdZxnrWMjHcS47TjCZ1VfajNe/K61XAwUGuNfC3o5zqoRh8spEA==} + '@zpress/ui@0.8.7': + resolution: {integrity: sha512-KqSo+LzuSck2Od8UHEvFbuVb/aHcNDGaAKtMkBAaD1o6JUccpleTvKqvZ+WO7urypKsUCV7Qd2gTAsQhI0CwoQ==} engines: {node: '>=24.0.0'} peerDependencies: - '@rspress/core': ^2.0.5 + '@rspress/core': ^2.0.6 react: ^19.2.4 react-dom: ^19.2.4 @@ -2469,8 +2473,8 @@ packages: engines: {node: '>=0.4.0'} hasBin: true - ai@6.0.116: - resolution: {integrity: sha512-7yM+cTmyRLeNIXwt4Vj+mrrJgVQ9RMIW5WO0ydoLoYkewIvsMcvUmqS4j2RJTUXaF1HphwmSKUMQ/HypNRGOmA==} + ai@6.0.136: + resolution: {integrity: sha512-cStC2i6FzQxHj9yVShcxq14yxP6Mbhp2kAheQTFvbIFd6hsn+izW7WrWW+AUH0Ek8Er8WImNkoYVDQ5Vy+rBjQ==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 @@ -2652,6 +2656,14 @@ packages: resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} engines: {node: '>=14'} + commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + + commander@8.3.0: + resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} + engines: {node: '>= 12'} + comment-parser@1.4.5: resolution: {integrity: sha512-aRDkn3uyIlCFfk5NUA+VdwMmMsh8JGhc4hapfV4yxymHGQ3BVskMQfoXGpCo5IoBuQ9tS5iiVKhCpTcB4pW4qw==} engines: {node: '>= 12.0.0'} @@ -2672,6 +2684,9 @@ packages: copy-to-clipboard@3.3.3: resolution: {integrity: sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==} + cose-base@1.0.3: + resolution: {integrity: sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==} + cross-spawn@7.0.6: resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} @@ -2679,6 +2694,160 @@ packages: csstype@3.2.3: resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + cytoscape-cose-bilkent@4.1.0: + resolution: {integrity: sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape@3.33.1: + resolution: {integrity: sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==} + engines: {node: '>=0.10'} + + d3-array@2.12.1: + resolution: {integrity: sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==} + + d3-array@3.2.4: + resolution: {integrity: sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==} + engines: {node: '>=12'} + + d3-axis@3.0.0: + resolution: {integrity: sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==} + engines: {node: '>=12'} + + d3-brush@3.0.0: + resolution: {integrity: sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==} + engines: {node: '>=12'} + + d3-chord@3.0.1: + resolution: {integrity: sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==} + engines: {node: '>=12'} + + d3-color@3.1.0: + resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} + engines: {node: '>=12'} + + d3-contour@4.0.2: + resolution: {integrity: sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==} + engines: {node: '>=12'} + + d3-delaunay@6.0.4: + resolution: {integrity: sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==} + engines: {node: '>=12'} + + d3-dispatch@3.0.1: + resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} + engines: {node: '>=12'} + + d3-drag@3.0.0: + resolution: {integrity: sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==} + engines: {node: '>=12'} + + d3-dsv@3.0.1: + resolution: {integrity: sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==} + engines: {node: '>=12'} + hasBin: true + + d3-ease@3.0.1: + resolution: {integrity: sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==} + engines: {node: '>=12'} + + d3-fetch@3.0.1: + resolution: {integrity: sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==} + engines: {node: '>=12'} + + d3-force@3.0.0: + resolution: {integrity: sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==} + engines: {node: '>=12'} + + d3-format@3.1.2: + resolution: {integrity: sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==} + engines: {node: '>=12'} + + d3-geo@3.1.1: + resolution: {integrity: sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==} + engines: {node: '>=12'} + + d3-hierarchy@3.1.2: + resolution: {integrity: sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==} + engines: {node: '>=12'} + + d3-interpolate@3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} + + d3-path@1.0.9: + resolution: {integrity: sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==} + + d3-path@3.1.0: + resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} + engines: {node: '>=12'} + + d3-polygon@3.0.1: + resolution: {integrity: sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==} + engines: {node: '>=12'} + + d3-quadtree@3.0.1: + resolution: {integrity: sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==} + engines: {node: '>=12'} + + d3-random@3.0.1: + resolution: {integrity: sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==} + engines: {node: '>=12'} + + d3-sankey@0.12.3: + resolution: {integrity: sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==} + + d3-scale-chromatic@3.1.0: + resolution: {integrity: sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==} + engines: {node: '>=12'} + + d3-scale@4.0.2: + resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} + engines: {node: '>=12'} + + d3-selection@3.0.0: + resolution: {integrity: sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==} + engines: {node: '>=12'} + + d3-shape@1.3.7: + resolution: {integrity: sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==} + + d3-shape@3.2.0: + resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} + engines: {node: '>=12'} + + d3-time-format@4.1.0: + resolution: {integrity: sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==} + engines: {node: '>=12'} + + d3-time@3.1.0: + resolution: {integrity: sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==} + engines: {node: '>=12'} + + d3-timer@3.0.1: + resolution: {integrity: sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==} + engines: {node: '>=12'} + + d3-transition@3.0.1: + resolution: {integrity: sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==} + engines: {node: '>=12'} + peerDependencies: + d3-selection: 2 - 3 + + d3-zoom@3.0.0: + resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} + engines: {node: '>=12'} + + d3@7.9.0: + resolution: {integrity: sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==} + engines: {node: '>=12'} + + dagre-d3-es@7.0.13: + resolution: {integrity: sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==} + + dayjs@1.11.20: + resolution: {integrity: sha512-YbwwqR/uYpeoP4pu043q+LTDLFBLApUP6VxRihdfNTqu4ubqMlGDLd6ErXhEgsyvY0K6nCs7nggYumAN+9uEuQ==} + debug@4.4.3: resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} engines: {node: '>=6.0'} @@ -2704,6 +2873,9 @@ packages: defu@6.1.4: resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} + delaunator@5.1.0: + resolution: {integrity: sha512-AGrQ4QSgssa1NGmWmLPqN5NY2KajF5MqxetNEO+o0n3ZwZZeTmt7bBnvzHWrmkZFxGgr4HdyFgelzgi06otLuQ==} + dequal@2.0.3: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} @@ -2718,10 +2890,17 @@ packages: devlop@1.1.0: resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} + diff@5.2.2: + resolution: {integrity: sha512-vtcDfH3TOjP8UekytvnHH1o1P4FcUdt4eQ1Y+Abap1tk/OB2MWQvcwS2ClCd1zuIhc3JKOx6p3kod8Vfys3E+A==} + engines: {node: '>=0.3.1'} + dir-glob@3.0.1: resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} engines: {node: '>=8'} + dompurify@3.3.3: + resolution: {integrity: sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==} + dotenv@17.3.1: resolution: {integrity: sha512-IO8C/dzEb6O3F9/twg6ZLXz164a2fhTnEWb95H23Dm4OuN+92NmEAlTrupP9VW6Jm3sO26tQlqyvyi4CsnY9GA==} engines: {node: '>=12'} @@ -2735,6 +2914,9 @@ packages: oxc-resolver: optional: true + elkjs@0.9.3: + resolution: {integrity: sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ==} + emoji-regex@10.6.0: resolution: {integrity: sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==} @@ -2916,8 +3098,8 @@ packages: fast-xml-builder@1.1.4: resolution: {integrity: sha512-f2jhpN4Eccy0/Uz9csxh3Nu6q4ErKxf0XIsasomfOihuSUa3/xw6w8dnOtCDgEItQFJG8KyXPzQXzcODDrrbOg==} - fast-xml-parser@5.5.6: - resolution: {integrity: sha512-3+fdZyBRVg29n4rXP0joHthhcHdPUHaIC16cuyyd1iLsuaO6Vea36MPrxgAzbZna8lhvZeRL8Bc9GP56/J9xEw==} + fast-xml-parser@5.5.9: + resolution: {integrity: sha512-jldvxr1MC6rtiZKgrFnDSvT8xuH+eJqxqOBThUVjYrxssYTo1avZLGql5l0a0BAERR01CadYzZ83kVEkbyDg+g==} hasBin: true fastq@1.20.1: @@ -2986,8 +3168,12 @@ packages: resolution: {integrity: sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==} engines: {node: '>=18'} - get-tsconfig@4.13.6: - resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==} + get-port@7.2.0: + resolution: {integrity: sha512-afP4W205ONCuMoPBqcR6PSXnzX35KTcJygfJfcp+QY+uwm3p20p1YczWXhlICIzGMCxYBQcySEcOgsJcrkyobg==} + engines: {node: '>=16'} + + get-tsconfig@4.13.7: + resolution: {integrity: sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q==} github-slugger@2.0.0: resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==} @@ -3055,8 +3241,8 @@ packages: hastscript@9.0.1: resolution: {integrity: sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==} - hono@4.12.8: - resolution: {integrity: sha512-VJCEvtrezO1IAR+kqEYnxUOoStaQPGrCmX3j4wDTNOcD1uRPFpGlwQUIW8niPuvHXaTUxeOUl5MMDGrl+tmO9A==} + hono@4.12.9: + resolution: {integrity: sha512-wy3T8Zm2bsEvxKZM5w21VdHDDcwVS1yUFFY6i8UobSsKfFceT7TOwhbhfKsDyx7tYQlmRM5FLpIuYvNFyjctiA==} engines: {node: '>=16.9.0'} hookable@6.1.0: @@ -3075,6 +3261,10 @@ packages: resolution: {integrity: sha512-tsYlhAYpjCKa//8rXZ9DqKEawhPoSytweBC2eNvcaDK+57RZLHGqNs3PZTQO6yekLFSuvA6AlnAfrw1uBvtb+Q==} hasBin: true + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + iconv-lite@0.7.2: resolution: {integrity: sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==} engines: {node: '>=0.10.0'} @@ -3097,6 +3287,13 @@ packages: inline-style-parser@0.2.7: resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==} + internmap@1.0.1: + resolution: {integrity: sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==} + + internmap@2.0.3: + resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} + engines: {node: '>=12'} + intl-messageformat@10.7.18: resolution: {integrity: sha512-m3Ofv/X/tV8Y3tHXLohcuVuhWKo7BBq62cqY15etqmLxg2DZ34AGGgQDeR+SCta2+zICb1NX83af0GJmbQ1++g==} @@ -3220,24 +3417,38 @@ packages: jsonfile@6.2.0: resolution: {integrity: sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==} + katex@0.16.40: + resolution: {integrity: sha512-1DJcK/L05k1Y9Gf7wMcyuqFOL6BiY3vY0CFcAM/LPRN04NALxcl6u7lOWNsp3f/bCHWxigzQl6FbR95XJ4R84Q==} + hasBin: true + keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + khroma@2.1.0: + resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} + kind-of@6.0.3: resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} engines: {node: '>=0.10.0'} + kleur@4.1.5: + resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} + engines: {node: '>=6'} + laufen@1.2.1: resolution: {integrity: sha512-t3EM1H2hQGx2jbHFUcIro0o2yjxjFIu47RfLAua1ibP8CX3PC3cZSiOpe/schHACKPx1CVPTms3Fk2hsuR6QlQ==} engines: {node: '>=22.0.0'} hasBin: true + layout-base@1.0.2: + resolution: {integrity: sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==} + levn@0.4.1: resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} engines: {node: '>= 0.8.0'} - liquidjs@10.25.0: - resolution: {integrity: sha512-XpO7AiGULTG4xcTlwkcTI5JreFG7b6esLCLp+aUSh7YuQErJZEoUXre9u9rbdb0057pfWG4l0VursvLd5Q/eAw==} + liquidjs@10.25.1: + resolution: {integrity: sha512-D+jsJvkGigFn8qNUgh8U6XNHhGFBp+p8Dk26ea/Hl+XrjFVSg9OXlN31hGAfS3MYQ3Kr8Xi9sEVBVQa/VTVSmg==} engines: {node: '>=16'} hasBin: true @@ -3281,6 +3492,9 @@ packages: mdast-util-find-and-replace@3.0.2: resolution: {integrity: sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==} + mdast-util-from-markdown@1.3.1: + resolution: {integrity: sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==} + mdast-util-from-markdown@2.0.3: resolution: {integrity: sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q==} @@ -3323,6 +3537,9 @@ packages: mdast-util-to-markdown@2.1.2: resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==} + mdast-util-to-string@3.2.0: + resolution: {integrity: sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==} + mdast-util-to-string@4.0.0: resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==} @@ -3333,6 +3550,12 @@ packages: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} + mermaid@10.9.5: + resolution: {integrity: sha512-eRlKEjzak4z1rcXeCd1OAlyawhrptClQDo8OuI8n6bSVqJ9oMfd5Lrf3Q+TdJHewi/9AIOc3UmEo8Fz+kNzzuQ==} + + micromark-core-commonmark@1.1.0: + resolution: {integrity: sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==} + micromark-core-commonmark@2.0.3: resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==} @@ -3401,69 +3624,129 @@ packages: micromark-extension-mdxjs@3.0.0: resolution: {integrity: sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==} + micromark-factory-destination@1.1.0: + resolution: {integrity: sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==} + micromark-factory-destination@2.0.1: resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==} + micromark-factory-label@1.1.0: + resolution: {integrity: sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==} + micromark-factory-label@2.0.1: resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==} micromark-factory-mdx-expression@2.0.3: resolution: {integrity: sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==} + micromark-factory-space@1.1.0: + resolution: {integrity: sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==} + micromark-factory-space@2.0.1: resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==} + micromark-factory-title@1.1.0: + resolution: {integrity: sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==} + micromark-factory-title@2.0.1: resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==} + micromark-factory-whitespace@1.1.0: + resolution: {integrity: sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==} + micromark-factory-whitespace@2.0.1: resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==} + micromark-util-character@1.2.0: + resolution: {integrity: sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==} + micromark-util-character@2.1.1: resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} + micromark-util-chunked@1.1.0: + resolution: {integrity: sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==} + micromark-util-chunked@2.0.1: resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==} + micromark-util-classify-character@1.1.0: + resolution: {integrity: sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==} + micromark-util-classify-character@2.0.1: resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==} + micromark-util-combine-extensions@1.1.0: + resolution: {integrity: sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==} + micromark-util-combine-extensions@2.0.1: resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==} + micromark-util-decode-numeric-character-reference@1.1.0: + resolution: {integrity: sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==} + micromark-util-decode-numeric-character-reference@2.0.2: resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==} + micromark-util-decode-string@1.1.0: + resolution: {integrity: sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==} + micromark-util-decode-string@2.0.1: resolution: {integrity: sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==} + micromark-util-encode@1.1.0: + resolution: {integrity: sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==} + micromark-util-encode@2.0.1: resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} micromark-util-events-to-acorn@2.0.3: resolution: {integrity: sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==} + micromark-util-html-tag-name@1.2.0: + resolution: {integrity: sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==} + micromark-util-html-tag-name@2.0.1: resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==} + micromark-util-normalize-identifier@1.1.0: + resolution: {integrity: sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==} + micromark-util-normalize-identifier@2.0.1: resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==} + micromark-util-resolve-all@1.1.0: + resolution: {integrity: sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==} + micromark-util-resolve-all@2.0.1: resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==} + micromark-util-sanitize-uri@1.2.0: + resolution: {integrity: sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==} + micromark-util-sanitize-uri@2.0.1: resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} + micromark-util-subtokenize@1.1.0: + resolution: {integrity: sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==} + micromark-util-subtokenize@2.1.0: resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==} + micromark-util-symbol@1.1.0: + resolution: {integrity: sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==} + micromark-util-symbol@2.0.1: resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} + micromark-util-types@1.1.0: + resolution: {integrity: sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==} + micromark-util-types@2.0.2: resolution: {integrity: sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==} + micromark@3.2.0: + resolution: {integrity: sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==} + micromark@4.0.2: resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==} @@ -3495,6 +3778,9 @@ packages: engines: {node: '>=10'} hasBin: true + non-layered-tidy-tree-layout@2.0.2: + resolution: {integrity: sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw==} + normalize-path@3.0.0: resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} engines: {node: '>=0.10.0'} @@ -3589,8 +3875,8 @@ packages: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} - path-expression-matcher@1.1.3: - resolution: {integrity: sha512-qdVgY8KXmVdJZRSS1JdEPOKPdTiEK/pi0RkcT2sw1RhXxohdujUlJFPuS1TSkevZ9vzd3ZlL7ULl1MHGTApKzQ==} + path-expression-matcher@1.2.0: + resolution: {integrity: sha512-DwmPWeFn+tq7TiyJ2CxezCAirXjFxvaiD03npak3cRjlP9+OjTmSy1EpIrEbh+l6JgUundniloMLDQ/6VTdhLQ==} engines: {node: '>=14.0.0'} path-key@3.1.1: @@ -3692,15 +3978,15 @@ packages: peerDependencies: react: '>=19' - react-router-dom@7.13.1: - resolution: {integrity: sha512-UJnV3Rxc5TgUPJt2KJpo1Jpy0OKQr0AjgbZzBFjaPJcFOb2Y8jA5H3LT8HUJAiRLlWrEXWHbF1Z4SCZaQjWDHw==} + react-router-dom@7.13.2: + resolution: {integrity: sha512-aR7SUORwTqAW0JDeiWF07e9SBE9qGpByR9I8kJT5h/FrBKxPMS6TiC7rmVO+gC0q52Bx7JnjWe8Z1sR9faN4YA==} engines: {node: '>=20.0.0'} peerDependencies: react: '>=18' react-dom: '>=18' - react-router@7.13.1: - resolution: {integrity: sha512-td+xP4X2/6BJvZoX6xw++A2DdEi++YypA69bJUV5oVvqf6/9/9nNlD70YO1e9d3MyamJEBQFEzk6mbfDYbqrSA==} + react-router@7.13.2: + resolution: {integrity: sha512-tX1Aee+ArlKQP+NIUd7SE6Li+CiGKwQtbS+FfRxPX6Pe4vHOo6nr9d++u5cwg+Z8K/x8tP+7qLmujDtfrAoUJA==} engines: {node: '>=20.0.0'} peerDependencies: react: '>=18' @@ -3820,6 +4106,9 @@ packages: resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + robust-predicates@3.0.3: + resolution: {integrity: sha512-NS3levdsRIUOmiJ8FZWCP7LG3QpJyrs/TE0Zpf1yvZu8cAJJ6QMW92H1c7kWpdIHo8RvmLxN/o2JXTKHp74lUA==} + rolldown-plugin-dts@0.22.5: resolution: {integrity: sha512-M/HXfM4cboo+jONx9Z0X+CUf3B5tCi7ni+kR5fUW50Fp9AlZk0oVLesibGWgCXDKFp5lpgQ9yhKoImUFjl3VZw==} engines: {node: '>=20.19.0'} @@ -3844,14 +4133,21 @@ packages: engines: {node: ^20.19.0 || >=22.12.0} hasBin: true - rollup@4.59.0: - resolution: {integrity: sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==} + rollup@4.60.0: + resolution: {integrity: sha512-yqjxruMGBQJ2gG4HtjZtAfXArHomazDHoFwFFmZZl0r7Pdo7qCIXKqKHZc8yeoMgzJJ+pO6pEEHa+V7uzWlrAQ==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + rw@1.3.3: + resolution: {integrity: sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==} + + sade@1.8.1: + resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==} + engines: {node: '>=6'} + safe-regex@2.1.1: resolution: {integrity: sha512-rx+x8AMzKb5Q5lQ95Zoi6ZbJqwCLkqi3XuJXp5P3rT8OEc6sZCJG5AE5dU3lsgRr/F4Bs31jSlVN+j5KrsGu9A==} @@ -3964,8 +4260,8 @@ packages: resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} engines: {node: '>=4'} - strnum@2.2.0: - resolution: {integrity: sha512-Y7Bj8XyJxnPAORMZj/xltsfo55uOiyHcU2tnAVzHUnSJR/KsEX+9RoDeXEnsXtl/CX4fAcrt64gZ13aGaWPeBg==} + strnum@2.2.2: + resolution: {integrity: sha512-DnR90I+jtXNSTXWdwrEy9FakW7UX+qUZg28gj5fk2vxxl7uS/3bpI4fjFYVmdK9etptYBPNkpahuQnEwhwECqA==} style-to-js@1.1.21: resolution: {integrity: sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==} @@ -3973,6 +4269,9 @@ packages: style-to-object@1.0.14: resolution: {integrity: sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==} + stylis@4.3.6: + resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} + supports-color@5.5.0: resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} engines: {node: '>=4'} @@ -4040,8 +4339,8 @@ packages: trough@2.2.0: resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==} - ts-api-utils@2.4.0: - resolution: {integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==} + ts-api-utils@2.5.0: + resolution: {integrity: sha512-OJ/ibxhPlqrMM0UiNHJ/0CKQkoKF243/AEmplt3qpRgkW8VG7IfOS41h7V8TjITqdByHzrjcS/2si+y4lIh8NA==} engines: {node: '>=18.12'} peerDependencies: typescript: '>=4.8.4' @@ -4051,6 +4350,10 @@ packages: peerDependencies: typescript: '>=4.0.0' + ts-dedent@2.2.0: + resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} + engines: {node: '>=6.10'} + ts-pattern@5.9.0: resolution: {integrity: sha512-6s5V71mX8qBUmlgbrfL33xDUwO0fq48rxAu2LBE11WBeGdpCPOsXksQbZJHvHwhrd3QjUusd3mAOM5Gg0mFBLg==} @@ -4162,6 +4465,9 @@ packages: unist-util-remove@4.0.0: resolution: {integrity: sha512-b4gokeGId57UVRX/eVKej5gXqGlc9+trkORhFJpu9raqZkZhU0zm8Doi05+HaiBsMEIJowL+2WtQ5ItjsngPXg==} + unist-util-stringify-position@3.0.3: + resolution: {integrity: sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==} + unist-util-stringify-position@4.0.0: resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==} @@ -4200,6 +4506,15 @@ packages: peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + uuid@9.0.1: + resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} + hasBin: true + + uvu@0.5.6: + resolution: {integrity: sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==} + engines: {node: '>=8'} + hasBin: true + vfile-location@5.0.3: resolution: {integrity: sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==} @@ -4287,6 +4602,9 @@ packages: web-namespaces@2.0.1: resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==} + web-worker@1.5.0: + resolution: {integrity: sha512-RiMReJrTAiA+mBjGONMnjVDP2u3p9R1vkcGz6gDIrOMT3oGuYwX2WRMYI9ipkphSuE5XKEhydbhNEJh4NY9mlw==} + which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} @@ -4309,8 +4627,8 @@ packages: resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} engines: {node: '>=10'} - yaml@2.8.2: - resolution: {integrity: sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==} + yaml@2.8.3: + resolution: {integrity: sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg==} engines: {node: '>= 14.6'} hasBin: true @@ -4337,23 +4655,23 @@ snapshots: '@ai-sdk/devtools@0.0.15': dependencies: '@ai-sdk/provider': 3.0.8 - '@hono/node-server': 1.19.11(hono@4.12.8) - hono: 4.12.8 + '@hono/node-server': 1.19.11(hono@4.12.9) + hono: 4.12.9 - '@ai-sdk/gateway@3.0.66(zod@4.3.6)': + '@ai-sdk/gateway@3.0.78(zod@4.3.6)': dependencies: '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.19(zod@4.3.6) + '@ai-sdk/provider-utils': 4.0.21(zod@4.3.6) '@vercel/oidc': 3.1.0 zod: 4.3.6 - '@ai-sdk/openai@3.0.41(zod@4.3.6)': + '@ai-sdk/openai@3.0.48(zod@4.3.6)': dependencies: '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.19(zod@4.3.6) + '@ai-sdk/provider-utils': 4.0.21(zod@4.3.6) zod: 4.3.6 - '@ai-sdk/provider-utils@4.0.19(zod@4.3.6)': + '@ai-sdk/provider-utils@4.0.21(zod@4.3.6)': dependencies: '@ai-sdk/provider': 3.0.8 '@standard-schema/spec': 1.1.0 @@ -4422,6 +4740,8 @@ snapshots: '@bcoe/v8-coverage@1.0.2': {} + '@braintree/sanitize-url@6.0.4': {} + '@changesets/apply-release-plan@7.1.0': dependencies: '@changesets/config': 3.1.3 @@ -4621,13 +4941,13 @@ snapshots: '@clerc/utils@1.3.1': {} - '@emnapi/core@1.9.0': + '@emnapi/core@1.9.1': dependencies: '@emnapi/wasi-threads': 1.2.0 tslib: 2.8.1 optional: true - '@emnapi/runtime@1.9.0': + '@emnapi/runtime@1.9.1': dependencies: tslib: 2.8.1 optional: true @@ -4640,7 +4960,7 @@ snapshots: '@es-joy/jsdoccomment@0.84.0': dependencies: '@types/estree': 1.0.8 - '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/types': 8.57.2 comment-parser: 1.4.5 esquery: 1.7.0 jsdoc-type-pratt-parser: 7.1.1 @@ -4781,9 +5101,9 @@ snapshots: dependencies: tslib: 2.8.1 - '@hono/node-server@1.19.11(hono@4.12.8)': + '@hono/node-server@1.19.11(hono@4.12.9)': dependencies: - hono: 4.12.8 + hono: 4.12.9 '@humanfs/core@0.19.1': {} @@ -4820,7 +5140,7 @@ snapshots: dependencies: '@iconify/types': 2.0.0 - '@iconify-json/simple-icons@1.2.74': + '@iconify-json/simple-icons@1.2.75': dependencies: '@iconify/types': 2.0.0 @@ -4904,16 +5224,16 @@ snapshots: - unplugin-unused - vue-tsc - '@kidd-cli/cli@0.4.9(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(typescript@5.9.3)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)))': + '@kidd-cli/cli@0.4.9(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(typescript@5.9.3)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)))': dependencies: '@kidd-cli/bundler': 0.2.5(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(typescript@5.9.3) '@kidd-cli/config': 0.1.6(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) - '@kidd-cli/core': 0.10.0(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) + '@kidd-cli/core': 0.10.0(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) '@kidd-cli/utils': 0.1.5 fs-extra: 11.3.4 - liquidjs: 10.25.0 + liquidjs: 10.25.1 picocolors: 1.1.1 - yaml: 2.8.2 + yaml: 2.8.3 zod: 4.3.6 transitivePeerDependencies: - '@arethetypeswrong/core' @@ -4949,31 +5269,7 @@ snapshots: - jiti - magicast - '@kidd-cli/core@0.10.0(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)))': - dependencies: - '@clack/prompts': 1.1.0 - '@kidd-cli/config': 0.1.6(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) - '@kidd-cli/utils': 0.1.5 - '@pinojs/redact': 0.4.0 - c12: 4.0.0-beta.4(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) - dotenv: 17.3.1 - es-toolkit: 1.45.1 - jsonc-parser: 3.3.1 - liquidjs: 10.25.0 - picocolors: 1.1.1 - ts-pattern: 5.9.0 - yaml: 2.8.2 - yargs: 18.0.0 - zod: 4.3.6 - optionalDependencies: - jiti: 2.6.1 - vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) - transitivePeerDependencies: - - chokidar - - giget - - magicast - - '@kidd-cli/core@0.7.1(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)))': + '@kidd-cli/core@0.10.0(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)))': dependencies: '@clack/prompts': 1.1.0 '@kidd-cli/config': 0.1.6(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) @@ -4983,15 +5279,15 @@ snapshots: dotenv: 17.3.1 es-toolkit: 1.45.1 jsonc-parser: 3.3.1 - liquidjs: 10.25.0 + liquidjs: 10.25.1 picocolors: 1.1.1 ts-pattern: 5.9.0 - yaml: 2.8.2 + yaml: 2.8.3 yargs: 18.0.0 zod: 4.3.6 optionalDependencies: jiti: 2.6.1 - vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) transitivePeerDependencies: - chokidar - giget @@ -5067,15 +5363,15 @@ snapshots: '@napi-rs/wasm-runtime@1.0.7': dependencies: - '@emnapi/core': 1.9.0 - '@emnapi/runtime': 1.9.0 + '@emnapi/core': 1.9.1 + '@emnapi/runtime': 1.9.1 '@tybys/wasm-util': 0.10.1 optional: true '@napi-rs/wasm-runtime@1.1.1': dependencies: - '@emnapi/core': 1.9.0 - '@emnapi/runtime': 1.9.0 + '@emnapi/core': 1.9.1 + '@emnapi/runtime': 1.9.1 '@tybys/wasm-util': 0.10.1 optional: true @@ -6309,79 +6605,79 @@ snapshots: '@rolldown/pluginutils@1.0.0-rc.9': {} - '@rollup/rollup-android-arm-eabi@4.59.0': + '@rollup/rollup-android-arm-eabi@4.60.0': optional: true - '@rollup/rollup-android-arm64@4.59.0': + '@rollup/rollup-android-arm64@4.60.0': optional: true - '@rollup/rollup-darwin-arm64@4.59.0': + '@rollup/rollup-darwin-arm64@4.60.0': optional: true - '@rollup/rollup-darwin-x64@4.59.0': + '@rollup/rollup-darwin-x64@4.60.0': optional: true - '@rollup/rollup-freebsd-arm64@4.59.0': + '@rollup/rollup-freebsd-arm64@4.60.0': optional: true - '@rollup/rollup-freebsd-x64@4.59.0': + '@rollup/rollup-freebsd-x64@4.60.0': optional: true - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': + '@rollup/rollup-linux-arm-gnueabihf@4.60.0': optional: true - '@rollup/rollup-linux-arm-musleabihf@4.59.0': + '@rollup/rollup-linux-arm-musleabihf@4.60.0': optional: true - '@rollup/rollup-linux-arm64-gnu@4.59.0': + '@rollup/rollup-linux-arm64-gnu@4.60.0': optional: true - '@rollup/rollup-linux-arm64-musl@4.59.0': + '@rollup/rollup-linux-arm64-musl@4.60.0': optional: true - '@rollup/rollup-linux-loong64-gnu@4.59.0': + '@rollup/rollup-linux-loong64-gnu@4.60.0': optional: true - '@rollup/rollup-linux-loong64-musl@4.59.0': + '@rollup/rollup-linux-loong64-musl@4.60.0': optional: true - '@rollup/rollup-linux-ppc64-gnu@4.59.0': + '@rollup/rollup-linux-ppc64-gnu@4.60.0': optional: true - '@rollup/rollup-linux-ppc64-musl@4.59.0': + '@rollup/rollup-linux-ppc64-musl@4.60.0': optional: true - '@rollup/rollup-linux-riscv64-gnu@4.59.0': + '@rollup/rollup-linux-riscv64-gnu@4.60.0': optional: true - '@rollup/rollup-linux-riscv64-musl@4.59.0': + '@rollup/rollup-linux-riscv64-musl@4.60.0': optional: true - '@rollup/rollup-linux-s390x-gnu@4.59.0': + '@rollup/rollup-linux-s390x-gnu@4.60.0': optional: true - '@rollup/rollup-linux-x64-gnu@4.59.0': + '@rollup/rollup-linux-x64-gnu@4.60.0': optional: true - '@rollup/rollup-linux-x64-musl@4.59.0': + '@rollup/rollup-linux-x64-musl@4.60.0': optional: true - '@rollup/rollup-openbsd-x64@4.59.0': + '@rollup/rollup-openbsd-x64@4.60.0': optional: true - '@rollup/rollup-openharmony-arm64@4.59.0': + '@rollup/rollup-openharmony-arm64@4.60.0': optional: true - '@rollup/rollup-win32-arm64-msvc@4.59.0': + '@rollup/rollup-win32-arm64-msvc@4.60.0': optional: true - '@rollup/rollup-win32-ia32-msvc@4.59.0': + '@rollup/rollup-win32-ia32-msvc@4.60.0': optional: true - '@rollup/rollup-win32-x64-gnu@4.59.0': + '@rollup/rollup-win32-x64-gnu@4.60.0': optional: true - '@rollup/rollup-win32-x64-msvc@4.59.0': + '@rollup/rollup-win32-x64-msvc@4.60.0': optional: true '@rsbuild/core@2.0.0-beta.6': @@ -6457,13 +6753,13 @@ snapshots: html-entities: 2.6.0 react-refresh: 0.18.0 - '@rspress/core@2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2)': + '@rspress/core@2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2)': dependencies: '@mdx-js/mdx': 3.1.1 '@mdx-js/react': 3.1.1(@types/react@19.2.14)(react@19.2.4) '@rsbuild/core': 2.0.0-beta.6 '@rsbuild/plugin-react': 1.4.6(@rsbuild/core@2.0.0-beta.6) - '@rspress/shared': 2.0.5 + '@rspress/shared': 2.0.6 '@shikijs/rehype': 4.0.2 '@types/unist': 3.0.3 '@unhead/react': 2.1.12(react@19.2.4) @@ -6487,7 +6783,7 @@ snapshots: react-lazy-with-preload: 2.2.1 react-reconciler: 0.33.0(react@19.2.4) react-render-to-markdown: 19.0.1(react@19.2.4) - react-router-dom: 7.13.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + react-router-dom: 7.13.2(react-dom@19.2.4(react@19.2.4))(react@19.2.4) rehype-external-links: 3.0.0 rehype-raw: 7.0.0 remark-cjk-friendly: 2.0.1(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5) @@ -6514,7 +6810,7 @@ snapshots: - supports-color - webpack-hot-middleware - '@rspress/shared@2.0.5': + '@rspress/shared@2.0.6': dependencies: '@rsbuild/core': 2.0.0-beta.6 '@shikijs/rehype': 4.0.2 @@ -6610,7 +6906,15 @@ snapshots: '@types/deep-eql': 4.0.2 assertion-error: 2.0.1 - '@types/debug@4.1.12': + '@types/d3-scale-chromatic@3.1.0': {} + + '@types/d3-scale@4.0.9': + dependencies: + '@types/d3-time': 3.0.4 + + '@types/d3-time@3.0.4': {} + + '@types/debug@4.1.13': dependencies: '@types/ms': 2.1.0 @@ -6632,6 +6936,10 @@ snapshots: '@types/json-schema@7.0.15': {} + '@types/mdast@3.0.15': + dependencies: + '@types/unist': 2.0.11 + '@types/mdast@4.0.4': dependencies: '@types/unist': 3.0.3 @@ -6652,71 +6960,74 @@ snapshots: dependencies: csstype: 3.2.3 + '@types/trusted-types@2.0.7': + optional: true + '@types/unist@2.0.11': {} '@types/unist@3.0.3': {} - '@typescript-eslint/project-service@8.57.1(typescript@5.9.3)': + '@typescript-eslint/project-service@8.57.2(typescript@5.9.3)': dependencies: - '@typescript-eslint/tsconfig-utils': 8.57.1(typescript@5.9.3) - '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/tsconfig-utils': 8.57.2(typescript@5.9.3) + '@typescript-eslint/types': 8.57.2 debug: 4.4.3(supports-color@5.5.0) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/scope-manager@8.57.1': + '@typescript-eslint/scope-manager@8.57.2': dependencies: - '@typescript-eslint/types': 8.57.1 - '@typescript-eslint/visitor-keys': 8.57.1 + '@typescript-eslint/types': 8.57.2 + '@typescript-eslint/visitor-keys': 8.57.2 - '@typescript-eslint/tsconfig-utils@8.57.1(typescript@5.9.3)': + '@typescript-eslint/tsconfig-utils@8.57.2(typescript@5.9.3)': dependencies: typescript: 5.9.3 - '@typescript-eslint/type-utils@8.57.1(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/type-utils@8.57.2(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@typescript-eslint/types': 8.57.1 - '@typescript-eslint/typescript-estree': 8.57.1(typescript@5.9.3) - '@typescript-eslint/utils': 8.57.1(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/types': 8.57.2 + '@typescript-eslint/typescript-estree': 8.57.2(typescript@5.9.3) + '@typescript-eslint/utils': 8.57.2(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) debug: 4.4.3(supports-color@5.5.0) eslint: 10.0.3(jiti@2.6.1) - ts-api-utils: 2.4.0(typescript@5.9.3) + ts-api-utils: 2.5.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/types@8.57.1': {} + '@typescript-eslint/types@8.57.2': {} - '@typescript-eslint/typescript-estree@8.57.1(typescript@5.9.3)': + '@typescript-eslint/typescript-estree@8.57.2(typescript@5.9.3)': dependencies: - '@typescript-eslint/project-service': 8.57.1(typescript@5.9.3) - '@typescript-eslint/tsconfig-utils': 8.57.1(typescript@5.9.3) - '@typescript-eslint/types': 8.57.1 - '@typescript-eslint/visitor-keys': 8.57.1 + '@typescript-eslint/project-service': 8.57.2(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.57.2(typescript@5.9.3) + '@typescript-eslint/types': 8.57.2 + '@typescript-eslint/visitor-keys': 8.57.2 debug: 4.4.3(supports-color@5.5.0) minimatch: 10.2.4 semver: 7.7.4 tinyglobby: 0.2.15 - ts-api-utils: 2.4.0(typescript@5.9.3) + ts-api-utils: 2.5.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.57.1(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/utils@8.57.2(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.9.1(eslint@10.0.3(jiti@2.6.1)) - '@typescript-eslint/scope-manager': 8.57.1 - '@typescript-eslint/types': 8.57.1 - '@typescript-eslint/typescript-estree': 8.57.1(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.57.2 + '@typescript-eslint/types': 8.57.2 + '@typescript-eslint/typescript-estree': 8.57.2(typescript@5.9.3) eslint: 10.0.3(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/visitor-keys@8.57.1': + '@typescript-eslint/visitor-keys@8.57.2': dependencies: - '@typescript-eslint/types': 8.57.1 + '@typescript-eslint/types': 8.57.2 eslint-visitor-keys: 5.0.1 '@ungap/structured-clone@1.3.0': {} @@ -6730,7 +7041,7 @@ snapshots: '@vercel/oidc@3.1.0': {} - '@vitest/coverage-v8@4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)))': + '@vitest/coverage-v8@4.1.0(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.1.0 @@ -6742,7 +7053,7 @@ snapshots: obug: 2.1.1 std-env: 4.0.0 tinyrainbow: 3.1.0 - vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) '@vitest/expect@4.1.0': dependencies: @@ -6753,13 +7064,13 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.1.0 - '@vitest/mocker@4.1.0(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.1.0(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))': dependencies: '@vitest/spy': 4.1.0 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: - vite: 7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3) '@vitest/pretty-format@4.1.0': dependencies: @@ -6785,14 +7096,15 @@ snapshots: convert-source-map: 2.0.0 tinyrainbow: 3.1.0 - '@zpress/cli@0.3.4(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)))': + '@zpress/cli@0.5.3(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)))': dependencies: - '@kidd-cli/core': 0.7.1(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) - '@rspress/core': 2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2) - '@zpress/core': 0.7.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3) - '@zpress/templates': 0.1.1 - '@zpress/ui': 0.7.0(@rspress/core@2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@kidd-cli/core': 0.10.0(chokidar@5.0.0)(jiti@2.6.1)(magicast@0.5.2)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) + '@rspress/core': 2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2) + '@zpress/core': 0.8.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3) + '@zpress/templates': 0.1.2 + '@zpress/ui': 0.8.7(@rspress/core@2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) es-toolkit: 1.45.1 + get-port: 7.2.0 ts-pattern: 5.9.0 zod: 4.3.6 transitivePeerDependencies: @@ -6815,9 +7127,9 @@ snapshots: - vitest - webpack-hot-middleware - '@zpress/config@0.3.0(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)': + '@zpress/config@0.5.0(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)': dependencies: - '@zpress/theme': 0.3.1 + '@zpress/theme': 0.3.2 c12: 4.0.0-beta.4(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) es-toolkit: 1.45.1 ts-pattern: 5.9.0 @@ -6830,17 +7142,18 @@ snapshots: - jiti - magicast - '@zpress/core@0.7.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3)': + '@zpress/core@0.8.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3)': dependencies: '@apidevtools/swagger-parser': 12.1.0(openapi-types@12.1.3) '@clack/prompts': 1.1.0 - '@zpress/config': 0.3.0(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) - '@zpress/theme': 0.3.1 + '@zpress/config': 0.5.0(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) + '@zpress/theme': 0.3.2 es-toolkit: 1.45.1 fast-glob: 3.3.3 gray-matter: 4.0.3 jiti: 2.6.1 js-yaml: 4.1.1 + liquidjs: 10.25.1 ts-pattern: 5.9.0 transitivePeerDependencies: - chokidar @@ -6849,12 +7162,12 @@ snapshots: - magicast - openapi-types - '@zpress/kit@0.2.4(@rspress/core@2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)))': + '@zpress/kit@0.2.12(@rspress/core@2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)))': dependencies: - '@rspress/core': 2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2) - '@zpress/cli': 0.3.4(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))) - '@zpress/core': 0.7.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3) - '@zpress/ui': 0.7.0(@rspress/core@2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + '@rspress/core': 2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2) + '@zpress/cli': 0.5.3(@types/mdast@4.0.4)(@types/react@19.2.14)(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(micromark-util-types@2.0.2)(micromark@4.0.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))) + '@zpress/core': 0.8.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3) + '@zpress/ui': 0.8.7(@rspress/core@2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) react: 19.2.4 react-dom: 19.2.4(react@19.2.4) transitivePeerDependencies: @@ -6875,17 +7188,17 @@ snapshots: - vitest - webpack-hot-middleware - '@zpress/templates@0.1.1': + '@zpress/templates@0.1.2': dependencies: es-toolkit: 1.45.1 - '@zpress/theme@0.3.1': + '@zpress/theme@0.3.2': dependencies: ts-pattern: 5.9.0 type-fest: 5.5.0 zod: 4.3.6 - '@zpress/ui@0.7.0(@rspress/core@2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@zpress/ui@0.8.7(@rspress/core@2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2))(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2)(openapi-types@12.1.3)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: '@iconify-json/catppuccin': 1.2.17 '@iconify-json/devicon': 1.2.61 @@ -6893,19 +7206,22 @@ snapshots: '@iconify-json/material-icon-theme': 1.2.56 '@iconify-json/mdi': 1.2.3 '@iconify-json/pixelarticons': 1.2.4 - '@iconify-json/simple-icons': 1.2.74 + '@iconify-json/simple-icons': 1.2.75 '@iconify-json/skill-icons': 1.2.4 '@iconify-json/vscode-icons': 1.2.45 '@iconify/react': 6.0.2(react@19.2.4) - '@rspress/core': 2.0.5(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2) - '@zpress/config': 0.3.0(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) - '@zpress/core': 0.7.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3) - '@zpress/theme': 0.3.1 + '@rspress/core': 2.0.6(@types/mdast@4.0.4)(@types/react@19.2.14)(micromark-util-types@2.0.2)(micromark@4.0.2) + '@zpress/config': 0.5.0(chokidar@5.0.0)(dotenv@17.3.1)(jiti@2.6.1)(magicast@0.5.2) + '@zpress/core': 0.8.0(chokidar@5.0.0)(dotenv@17.3.1)(magicast@0.5.2)(openapi-types@12.1.3) + '@zpress/theme': 0.3.2 + katex: 0.16.40 + mermaid: 10.9.5 openapi-sampler: 1.7.2 react: 19.2.4 react-aria-components: 1.16.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) react-dom: 19.2.4(react@19.2.4) ts-pattern: 5.9.0 + unist-util-visit: 5.1.0 transitivePeerDependencies: - chokidar - dotenv @@ -6913,6 +7229,7 @@ snapshots: - jiti - magicast - openapi-types + - supports-color acorn-jsx@5.3.2(acorn@8.16.0): dependencies: @@ -6920,11 +7237,11 @@ snapshots: acorn@8.16.0: {} - ai@6.0.116(zod@4.3.6): + ai@6.0.136(zod@4.3.6): dependencies: - '@ai-sdk/gateway': 3.0.66(zod@4.3.6) + '@ai-sdk/gateway': 3.0.78(zod@4.3.6) '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.19(zod@4.3.6) + '@ai-sdk/provider-utils': 4.0.21(zod@4.3.6) '@opentelemetry/api': 1.9.0 zod: 4.3.6 @@ -7084,6 +7401,10 @@ snapshots: commander@10.0.1: {} + commander@7.2.0: {} + + commander@8.3.0: {} + comment-parser@1.4.5: {} compute-scroll-into-view@3.1.1: {} @@ -7098,6 +7419,10 @@ snapshots: dependencies: toggle-selection: 1.0.6 + cose-base@1.0.3: + dependencies: + layout-base: 1.0.2 + cross-spawn@7.0.6: dependencies: path-key: 3.1.1 @@ -7106,6 +7431,187 @@ snapshots: csstype@3.2.3: {} + cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): + dependencies: + cose-base: 1.0.3 + cytoscape: 3.33.1 + + cytoscape@3.33.1: {} + + d3-array@2.12.1: + dependencies: + internmap: 1.0.1 + + d3-array@3.2.4: + dependencies: + internmap: 2.0.3 + + d3-axis@3.0.0: {} + + d3-brush@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3-chord@3.0.1: + dependencies: + d3-path: 3.1.0 + + d3-color@3.1.0: {} + + d3-contour@4.0.2: + dependencies: + d3-array: 3.2.4 + + d3-delaunay@6.0.4: + dependencies: + delaunator: 5.1.0 + + d3-dispatch@3.0.1: {} + + d3-drag@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-selection: 3.0.0 + + d3-dsv@3.0.1: + dependencies: + commander: 7.2.0 + iconv-lite: 0.6.3 + rw: 1.3.3 + + d3-ease@3.0.1: {} + + d3-fetch@3.0.1: + dependencies: + d3-dsv: 3.0.1 + + d3-force@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-quadtree: 3.0.1 + d3-timer: 3.0.1 + + d3-format@3.1.2: {} + + d3-geo@3.1.1: + dependencies: + d3-array: 3.2.4 + + d3-hierarchy@3.1.2: {} + + d3-interpolate@3.0.1: + dependencies: + d3-color: 3.1.0 + + d3-path@1.0.9: {} + + d3-path@3.1.0: {} + + d3-polygon@3.0.1: {} + + d3-quadtree@3.0.1: {} + + d3-random@3.0.1: {} + + d3-sankey@0.12.3: + dependencies: + d3-array: 2.12.1 + d3-shape: 1.3.7 + + d3-scale-chromatic@3.1.0: + dependencies: + d3-color: 3.1.0 + d3-interpolate: 3.0.1 + + d3-scale@4.0.2: + dependencies: + d3-array: 3.2.4 + d3-format: 3.1.2 + d3-interpolate: 3.0.1 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + + d3-selection@3.0.0: {} + + d3-shape@1.3.7: + dependencies: + d3-path: 1.0.9 + + d3-shape@3.2.0: + dependencies: + d3-path: 3.1.0 + + d3-time-format@4.1.0: + dependencies: + d3-time: 3.1.0 + + d3-time@3.1.0: + dependencies: + d3-array: 3.2.4 + + d3-timer@3.0.1: {} + + d3-transition@3.0.1(d3-selection@3.0.0): + dependencies: + d3-color: 3.1.0 + d3-dispatch: 3.0.1 + d3-ease: 3.0.1 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-timer: 3.0.1 + + d3-zoom@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3@7.9.0: + dependencies: + d3-array: 3.2.4 + d3-axis: 3.0.0 + d3-brush: 3.0.0 + d3-chord: 3.0.1 + d3-color: 3.1.0 + d3-contour: 4.0.2 + d3-delaunay: 6.0.4 + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-dsv: 3.0.1 + d3-ease: 3.0.1 + d3-fetch: 3.0.1 + d3-force: 3.0.0 + d3-format: 3.1.2 + d3-geo: 3.1.1 + d3-hierarchy: 3.1.2 + d3-interpolate: 3.0.1 + d3-path: 3.1.0 + d3-polygon: 3.0.1 + d3-quadtree: 3.0.1 + d3-random: 3.0.1 + d3-scale: 4.0.2 + d3-scale-chromatic: 3.1.0 + d3-selection: 3.0.0 + d3-shape: 3.2.0 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + d3-timer: 3.0.1 + d3-transition: 3.0.1(d3-selection@3.0.0) + d3-zoom: 3.0.0 + + dagre-d3-es@7.0.13: + dependencies: + d3: 7.9.0 + lodash-es: 4.17.23 + + dayjs@1.11.20: {} + debug@4.4.3(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -7124,6 +7630,10 @@ snapshots: defu@6.1.4: {} + delaunator@5.1.0: + dependencies: + robust-predicates: 3.0.3 + dequal@2.0.3: {} destr@2.0.5: {} @@ -7134,14 +7644,22 @@ snapshots: dependencies: dequal: 2.0.3 + diff@5.2.2: {} + dir-glob@3.0.1: dependencies: path-type: 4.0.0 + dompurify@3.3.3: + optionalDependencies: + '@types/trusted-types': 2.0.7 + dotenv@17.3.1: {} dts-resolver@2.1.3: {} + elkjs@0.9.3: {} + emoji-regex@10.6.0: {} empathic@2.0.0: {} @@ -7212,12 +7730,12 @@ snapshots: eslint-plugin-functional@9.0.4(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@typescript-eslint/utils': 8.57.1(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.57.2(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) deepmerge-ts: 7.1.5 escape-string-regexp: 5.0.0 eslint: 10.0.3(jiti@2.6.1) is-immutable-type: 5.0.1(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) - ts-api-utils: 2.4.0(typescript@5.9.3) + ts-api-utils: 2.5.0(typescript@5.9.3) ts-declaration-location: 1.0.7(typescript@5.9.3) optionalDependencies: typescript: 5.9.3 @@ -7387,13 +7905,13 @@ snapshots: fast-xml-builder@1.1.4: dependencies: - path-expression-matcher: 1.1.3 + path-expression-matcher: 1.2.0 - fast-xml-parser@5.5.6: + fast-xml-parser@5.5.9: dependencies: fast-xml-builder: 1.1.4 - path-expression-matcher: 1.1.3 - strnum: 2.2.0 + path-expression-matcher: 1.2.0 + strnum: 2.2.2 fastq@1.20.1: dependencies: @@ -7457,7 +7975,9 @@ snapshots: get-east-asian-width@1.5.0: {} - get-tsconfig@4.13.6: + get-port@7.2.0: {} + + get-tsconfig@4.13.7: dependencies: resolve-pkg-maps: 1.0.0 @@ -7613,7 +8133,7 @@ snapshots: property-information: 7.1.0 space-separated-tokens: 2.0.2 - hono@4.12.8: {} + hono@4.12.9: {} hookable@6.1.0: {} @@ -7625,6 +8145,10 @@ snapshots: human-id@4.1.3: {} + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + iconv-lite@0.7.2: dependencies: safer-buffer: 2.1.2 @@ -7639,6 +8163,10 @@ snapshots: inline-style-parser@0.2.7: {} + internmap@1.0.1: {} + + internmap@2.0.3: {} + intl-messageformat@10.7.18: dependencies: '@formatjs/ecma402-abstract': 2.3.6 @@ -7673,9 +8201,9 @@ snapshots: is-immutable-type@5.0.1(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@typescript-eslint/type-utils': 8.57.1(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/type-utils': 8.57.2(eslint@10.0.3(jiti@2.6.1))(typescript@5.9.3) eslint: 10.0.3(jiti@2.6.1) - ts-api-utils: 2.4.0(typescript@5.9.3) + ts-api-utils: 2.5.0(typescript@5.9.3) ts-declaration-location: 1.0.7(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: @@ -7749,12 +8277,20 @@ snapshots: optionalDependencies: graceful-fs: 4.2.11 + katex@0.16.40: + dependencies: + commander: 8.3.0 + keyv@4.5.4: dependencies: json-buffer: 3.0.1 + khroma@2.1.0: {} + kind-of@6.0.3: {} + kleur@4.1.5: {} + laufen@1.2.1(magicast@0.5.2): dependencies: '@clack/prompts': 1.1.0 @@ -7768,18 +8304,20 @@ snapshots: jiti: 2.6.1 picocolors: 1.1.1 picomatch: 4.0.3 - yaml: 2.8.2 + yaml: 2.8.3 zod: 4.3.6 transitivePeerDependencies: - giget - magicast + layout-base@1.0.2: {} + levn@0.4.1: dependencies: prelude-ls: 1.2.1 type-check: 0.4.0 - liquidjs@10.25.0: + liquidjs@10.25.1: dependencies: commander: 10.0.1 @@ -7824,6 +8362,23 @@ snapshots: unist-util-is: 6.0.1 unist-util-visit-parents: 6.0.2 + mdast-util-from-markdown@1.3.1: + dependencies: + '@types/mdast': 3.0.15 + '@types/unist': 2.0.11 + decode-named-character-reference: 1.3.0 + mdast-util-to-string: 3.2.0 + micromark: 3.2.0 + micromark-util-decode-numeric-character-reference: 1.1.0 + micromark-util-decode-string: 1.1.0 + micromark-util-normalize-identifier: 1.1.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + unist-util-stringify-position: 3.0.3 + uvu: 0.5.6 + transitivePeerDependencies: + - supports-color + mdast-util-from-markdown@2.0.3: dependencies: '@types/mdast': 4.0.4 @@ -7976,6 +8531,10 @@ snapshots: unist-util-visit: 5.1.0 zwitch: 2.0.4 + mdast-util-to-string@3.2.0: + dependencies: + '@types/mdast': 3.0.15 + mdast-util-to-string@4.0.0: dependencies: '@types/mdast': 4.0.4 @@ -7984,6 +8543,50 @@ snapshots: merge2@1.4.1: {} + mermaid@10.9.5: + dependencies: + '@braintree/sanitize-url': 6.0.4 + '@types/d3-scale': 4.0.9 + '@types/d3-scale-chromatic': 3.1.0 + cytoscape: 3.33.1 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.33.1) + d3: 7.9.0 + d3-sankey: 0.12.3 + dagre-d3-es: 7.0.13 + dayjs: 1.11.20 + dompurify: 3.3.3 + elkjs: 0.9.3 + katex: 0.16.40 + khroma: 2.1.0 + lodash-es: 4.17.23 + mdast-util-from-markdown: 1.3.1 + non-layered-tidy-tree-layout: 2.0.2 + stylis: 4.3.6 + ts-dedent: 2.2.0 + uuid: 9.0.1 + web-worker: 1.5.0 + transitivePeerDependencies: + - supports-color + + micromark-core-commonmark@1.1.0: + dependencies: + decode-named-character-reference: 1.3.0 + micromark-factory-destination: 1.1.0 + micromark-factory-label: 1.1.0 + micromark-factory-space: 1.1.0 + micromark-factory-title: 1.1.0 + micromark-factory-whitespace: 1.1.0 + micromark-util-character: 1.2.0 + micromark-util-chunked: 1.1.0 + micromark-util-classify-character: 1.1.0 + micromark-util-html-tag-name: 1.2.0 + micromark-util-normalize-identifier: 1.1.0 + micromark-util-resolve-all: 1.1.0 + micromark-util-subtokenize: 1.1.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + uvu: 0.5.6 + micromark-core-commonmark@2.0.3: dependencies: decode-named-character-reference: 1.3.0 @@ -8144,12 +8747,25 @@ snapshots: micromark-util-combine-extensions: 2.0.1 micromark-util-types: 2.0.2 + micromark-factory-destination@1.1.0: + dependencies: + micromark-util-character: 1.2.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + micromark-factory-destination@2.0.1: dependencies: micromark-util-character: 2.1.1 micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-factory-label@1.1.0: + dependencies: + micromark-util-character: 1.2.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + uvu: 0.5.6 + micromark-factory-label@2.0.1: dependencies: devlop: 1.1.0 @@ -8169,11 +8785,23 @@ snapshots: unist-util-position-from-estree: 2.0.0 vfile-message: 4.0.3 + micromark-factory-space@1.1.0: + dependencies: + micromark-util-character: 1.2.0 + micromark-util-types: 1.1.0 + micromark-factory-space@2.0.1: dependencies: micromark-util-character: 2.1.1 micromark-util-types: 2.0.2 + micromark-factory-title@1.1.0: + dependencies: + micromark-factory-space: 1.1.0 + micromark-util-character: 1.2.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + micromark-factory-title@2.0.1: dependencies: micromark-factory-space: 2.0.1 @@ -8181,6 +8809,13 @@ snapshots: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-factory-whitespace@1.1.0: + dependencies: + micromark-factory-space: 1.1.0 + micromark-util-character: 1.2.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + micromark-factory-whitespace@2.0.1: dependencies: micromark-factory-space: 2.0.1 @@ -8188,30 +8823,61 @@ snapshots: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-util-character@1.2.0: + dependencies: + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + micromark-util-character@2.1.1: dependencies: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-util-chunked@1.1.0: + dependencies: + micromark-util-symbol: 1.1.0 + micromark-util-chunked@2.0.1: dependencies: micromark-util-symbol: 2.0.1 + micromark-util-classify-character@1.1.0: + dependencies: + micromark-util-character: 1.2.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + micromark-util-classify-character@2.0.1: dependencies: micromark-util-character: 2.1.1 micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-util-combine-extensions@1.1.0: + dependencies: + micromark-util-chunked: 1.1.0 + micromark-util-types: 1.1.0 + micromark-util-combine-extensions@2.0.1: dependencies: micromark-util-chunked: 2.0.1 micromark-util-types: 2.0.2 + micromark-util-decode-numeric-character-reference@1.1.0: + dependencies: + micromark-util-symbol: 1.1.0 + micromark-util-decode-numeric-character-reference@2.0.2: dependencies: micromark-util-symbol: 2.0.1 + micromark-util-decode-string@1.1.0: + dependencies: + decode-named-character-reference: 1.3.0 + micromark-util-character: 1.2.0 + micromark-util-decode-numeric-character-reference: 1.1.0 + micromark-util-symbol: 1.1.0 + micromark-util-decode-string@2.0.1: dependencies: decode-named-character-reference: 1.3.0 @@ -8219,6 +8885,8 @@ snapshots: micromark-util-decode-numeric-character-reference: 2.0.2 micromark-util-symbol: 2.0.1 + micromark-util-encode@1.1.0: {} + micromark-util-encode@2.0.1: {} micromark-util-events-to-acorn@2.0.3: @@ -8231,22 +8899,45 @@ snapshots: micromark-util-types: 2.0.2 vfile-message: 4.0.3 + micromark-util-html-tag-name@1.2.0: {} + micromark-util-html-tag-name@2.0.1: {} + micromark-util-normalize-identifier@1.1.0: + dependencies: + micromark-util-symbol: 1.1.0 + micromark-util-normalize-identifier@2.0.1: dependencies: micromark-util-symbol: 2.0.1 + micromark-util-resolve-all@1.1.0: + dependencies: + micromark-util-types: 1.1.0 + micromark-util-resolve-all@2.0.1: dependencies: micromark-util-types: 2.0.2 + micromark-util-sanitize-uri@1.2.0: + dependencies: + micromark-util-character: 1.2.0 + micromark-util-encode: 1.1.0 + micromark-util-symbol: 1.1.0 + micromark-util-sanitize-uri@2.0.1: dependencies: micromark-util-character: 2.1.1 micromark-util-encode: 2.0.1 micromark-util-symbol: 2.0.1 + micromark-util-subtokenize@1.1.0: + dependencies: + micromark-util-chunked: 1.1.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + uvu: 0.5.6 + micromark-util-subtokenize@2.1.0: dependencies: devlop: 1.1.0 @@ -8254,13 +8945,39 @@ snapshots: micromark-util-symbol: 2.0.1 micromark-util-types: 2.0.2 + micromark-util-symbol@1.1.0: {} + micromark-util-symbol@2.0.1: {} + micromark-util-types@1.1.0: {} + micromark-util-types@2.0.2: {} + micromark@3.2.0: + dependencies: + '@types/debug': 4.1.13 + debug: 4.4.3(supports-color@5.5.0) + decode-named-character-reference: 1.3.0 + micromark-core-commonmark: 1.1.0 + micromark-factory-space: 1.1.0 + micromark-util-character: 1.2.0 + micromark-util-chunked: 1.1.0 + micromark-util-combine-extensions: 1.1.0 + micromark-util-decode-numeric-character-reference: 1.1.0 + micromark-util-encode: 1.1.0 + micromark-util-normalize-identifier: 1.1.0 + micromark-util-resolve-all: 1.1.0 + micromark-util-sanitize-uri: 1.2.0 + micromark-util-subtokenize: 1.1.0 + micromark-util-symbol: 1.1.0 + micromark-util-types: 1.1.0 + uvu: 0.5.6 + transitivePeerDependencies: + - supports-color + micromark@4.0.2: dependencies: - '@types/debug': 4.1.12 + '@types/debug': 4.1.13 debug: 4.4.3(supports-color@5.5.0) decode-named-character-reference: 1.3.0 devlop: 1.1.0 @@ -8310,6 +9027,8 @@ snapshots: touch: 3.1.1 undefsafe: 2.0.5 + non-layered-tidy-tree-layout@2.0.2: {} + normalize-path@3.0.0: {} nprogress@0.2.0: {} @@ -8329,7 +9048,7 @@ snapshots: openapi-sampler@1.7.2: dependencies: '@types/json-schema': 7.0.15 - fast-xml-parser: 5.5.6 + fast-xml-parser: 5.5.9 json-pointer: 0.6.2 openapi-types@12.1.3: {} @@ -8441,7 +9160,7 @@ snapshots: path-exists@4.0.0: {} - path-expression-matcher@1.1.3: {} + path-expression-matcher@1.2.0: {} path-key@3.1.1: {} @@ -8590,13 +9309,13 @@ snapshots: react: 19.2.4 react-reconciler: 0.33.0(react@19.2.4) - react-router-dom@7.13.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4): + react-router-dom@7.13.2(react-dom@19.2.4(react@19.2.4))(react@19.2.4): dependencies: react: 19.2.4 react-dom: 19.2.4(react@19.2.4) - react-router: 7.13.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + react-router: 7.13.2(react-dom@19.2.4(react@19.2.4))(react@19.2.4) - react-router@7.13.1(react-dom@19.2.4(react@19.2.4))(react@19.2.4): + react-router@7.13.2(react-dom@19.2.4(react@19.2.4))(react@19.2.4): dependencies: cookie: 1.1.1 react: 19.2.4 @@ -8784,6 +9503,8 @@ snapshots: reusify@1.1.0: {} + robust-predicates@3.0.3: {} + rolldown-plugin-dts@0.22.5(rolldown@1.0.0-rc.9)(typescript@5.9.3): dependencies: '@babel/generator': 8.0.0-rc.2 @@ -8793,7 +9514,7 @@ snapshots: ast-kit: 3.0.0-beta.1 birpc: 4.0.0 dts-resolver: 2.1.3 - get-tsconfig: 4.13.6 + get-tsconfig: 4.13.7 obug: 2.1.1 rolldown: 1.0.0-rc.9 optionalDependencies: @@ -8822,41 +9543,47 @@ snapshots: '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.9 '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.9 - rollup@4.59.0: + rollup@4.60.0: dependencies: '@types/estree': 1.0.8 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.59.0 - '@rollup/rollup-android-arm64': 4.59.0 - '@rollup/rollup-darwin-arm64': 4.59.0 - '@rollup/rollup-darwin-x64': 4.59.0 - '@rollup/rollup-freebsd-arm64': 4.59.0 - '@rollup/rollup-freebsd-x64': 4.59.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.59.0 - '@rollup/rollup-linux-arm-musleabihf': 4.59.0 - '@rollup/rollup-linux-arm64-gnu': 4.59.0 - '@rollup/rollup-linux-arm64-musl': 4.59.0 - '@rollup/rollup-linux-loong64-gnu': 4.59.0 - '@rollup/rollup-linux-loong64-musl': 4.59.0 - '@rollup/rollup-linux-ppc64-gnu': 4.59.0 - '@rollup/rollup-linux-ppc64-musl': 4.59.0 - '@rollup/rollup-linux-riscv64-gnu': 4.59.0 - '@rollup/rollup-linux-riscv64-musl': 4.59.0 - '@rollup/rollup-linux-s390x-gnu': 4.59.0 - '@rollup/rollup-linux-x64-gnu': 4.59.0 - '@rollup/rollup-linux-x64-musl': 4.59.0 - '@rollup/rollup-openbsd-x64': 4.59.0 - '@rollup/rollup-openharmony-arm64': 4.59.0 - '@rollup/rollup-win32-arm64-msvc': 4.59.0 - '@rollup/rollup-win32-ia32-msvc': 4.59.0 - '@rollup/rollup-win32-x64-gnu': 4.59.0 - '@rollup/rollup-win32-x64-msvc': 4.59.0 + '@rollup/rollup-android-arm-eabi': 4.60.0 + '@rollup/rollup-android-arm64': 4.60.0 + '@rollup/rollup-darwin-arm64': 4.60.0 + '@rollup/rollup-darwin-x64': 4.60.0 + '@rollup/rollup-freebsd-arm64': 4.60.0 + '@rollup/rollup-freebsd-x64': 4.60.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.60.0 + '@rollup/rollup-linux-arm-musleabihf': 4.60.0 + '@rollup/rollup-linux-arm64-gnu': 4.60.0 + '@rollup/rollup-linux-arm64-musl': 4.60.0 + '@rollup/rollup-linux-loong64-gnu': 4.60.0 + '@rollup/rollup-linux-loong64-musl': 4.60.0 + '@rollup/rollup-linux-ppc64-gnu': 4.60.0 + '@rollup/rollup-linux-ppc64-musl': 4.60.0 + '@rollup/rollup-linux-riscv64-gnu': 4.60.0 + '@rollup/rollup-linux-riscv64-musl': 4.60.0 + '@rollup/rollup-linux-s390x-gnu': 4.60.0 + '@rollup/rollup-linux-x64-gnu': 4.60.0 + '@rollup/rollup-linux-x64-musl': 4.60.0 + '@rollup/rollup-openbsd-x64': 4.60.0 + '@rollup/rollup-openharmony-arm64': 4.60.0 + '@rollup/rollup-win32-arm64-msvc': 4.60.0 + '@rollup/rollup-win32-ia32-msvc': 4.60.0 + '@rollup/rollup-win32-x64-gnu': 4.60.0 + '@rollup/rollup-win32-x64-msvc': 4.60.0 fsevents: 2.3.3 run-parallel@1.2.0: dependencies: queue-microtask: 1.2.3 + rw@1.3.3: {} + + sade@1.8.1: + dependencies: + mri: 1.2.0 + safe-regex@2.1.1: dependencies: regexp-tree: 0.1.27 @@ -8958,7 +9685,7 @@ snapshots: strip-bom@3.0.0: {} - strnum@2.2.0: {} + strnum@2.2.2: {} style-to-js@1.1.21: dependencies: @@ -8968,6 +9695,8 @@ snapshots: dependencies: inline-style-parser: 0.2.7 + stylis@4.3.6: {} + supports-color@5.5.0: dependencies: has-flag: 3.0.0 @@ -9016,7 +9745,7 @@ snapshots: trough@2.2.0: {} - ts-api-utils@2.4.0(typescript@5.9.3): + ts-api-utils@2.5.0(typescript@5.9.3): dependencies: typescript: 5.9.3 @@ -9025,6 +9754,8 @@ snapshots: picomatch: 4.0.3 typescript: 5.9.3 + ts-dedent@2.2.0: {} + ts-pattern@5.9.0: {} tsdown@0.21.3(typescript@5.9.3): @@ -9086,7 +9817,7 @@ snapshots: tsx@4.21.0: dependencies: esbuild: 0.27.4 - get-tsconfig: 4.13.6 + get-tsconfig: 4.13.7 optionalDependencies: fsevents: 2.3.3 @@ -9150,6 +9881,10 @@ snapshots: unist-util-is: 6.0.1 unist-util-visit-parents: 6.0.2 + unist-util-stringify-position@3.0.3: + dependencies: + '@types/unist': 2.0.11 + unist-util-stringify-position@4.0.0: dependencies: '@types/unist': 3.0.3 @@ -9185,6 +9920,15 @@ snapshots: dependencies: react: 19.2.4 + uuid@9.0.1: {} + + uvu@0.5.6: + dependencies: + dequal: 2.0.3 + diff: 5.2.2 + kleur: 4.1.5 + sade: 1.8.1 + vfile-location@5.0.3: dependencies: '@types/unist': 3.0.3 @@ -9200,25 +9944,25 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.3 - vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2): + vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3): dependencies: esbuild: 0.27.4 fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.8 - rollup: 4.59.0 + rollup: 4.60.0 tinyglobby: 0.2.15 optionalDependencies: '@types/node': 25.5.0 fsevents: 2.3.3 jiti: 2.6.1 tsx: 4.21.0 - yaml: 2.8.2 + yaml: 2.8.3 - vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)): + vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)): dependencies: '@vitest/expect': 4.1.0 - '@vitest/mocker': 4.1.0(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.1.0(vite@7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3)) '@vitest/pretty-format': 4.1.0 '@vitest/runner': 4.1.0 '@vitest/snapshot': 4.1.0 @@ -9235,7 +9979,7 @@ snapshots: tinyexec: 1.0.4 tinyglobby: 0.2.15 tinyrainbow: 3.1.0 - vite: 7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@25.5.0)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.0 @@ -9245,6 +9989,8 @@ snapshots: web-namespaces@2.0.1: {} + web-worker@1.5.0: {} + which@2.0.2: dependencies: isexe: 2.0.0 @@ -9264,7 +10010,7 @@ snapshots: y18n@5.0.8: {} - yaml@2.8.2: {} + yaml@2.8.3: {} yargs-parser@22.0.0: {} diff --git a/vercel.json b/vercel.json index 03a53cb..4e0b544 100644 --- a/vercel.json +++ b/vercel.json @@ -3,5 +3,5 @@ "outputDirectory": ".zpress/dist", "framework": null, "installCommand": "pnpm install", - "ignoreCommand": "git diff HEAD^ HEAD --quiet vercel.json zpress.config.ts packages/agents/docs/ packages/prompts/docs/ packages/cli/README.md contributing/" + "ignoreCommand": "pnpm zpress diff --ref HEAD^" } diff --git a/zpress.config.ts b/zpress.config.ts index a9f22c4..80bfa8f 100644 --- a/zpress.config.ts +++ b/zpress.config.ts @@ -8,274 +8,343 @@ export default defineConfig({ theme: { name: "arcade", }, + actions: [ + { + theme: "brand", + text: "Quick Start", + link: "/quick-start", + }, + { + theme: "alt", + text: "Introduction", + link: "/introduction", + }, + ], + features: [ + { + title: "Functions All the Way Down", + description: + "agent(), tool(), flowAgent() are plain functions returning composable objects. No classes, no decorators, no inheritance — just functions you can read top to bottom.", + icon: "mdi:lambda", + link: "/concepts/agents", + }, + { + title: "One API, Zero Workflows", + description: + "agent() for single-turn, flowAgent() for multi-step — same programming model, same hooks, same tools. No workflow DSL to learn, no orchestrator to configure. Just functions that compose.", + icon: "mdi:puzzle-outline", + link: "/concepts/flow-agents", + }, + { + title: "Type-Safe Prompts", + description: + "Write .prompt files with YAML frontmatter and LiquidJS templates. Build-time codegen produces fully typed TypeScript modules with Zod validation.", + icon: "mdi:file-code-outline", + link: "/concepts/prompts", + }, + ], packages: [ { title: "@funkai/agents", - description: "Lightweight workflow and agent orchestration framework", - icon: "pixelarticons:robot", - prefix: "/agents", - tags: [], - discovery: {}, + description: "Agent orchestration SDK", + icon: "mdi:robot-outline", + path: "/packages/agents", + }, + { + title: "@funkai/models", + description: "Model catalog and cost calculation", + icon: "mdi:currency-usd", + path: "/packages/models", }, { title: "@funkai/prompts", - description: "Prompt SDK with LiquidJS templating and Zod validation", - icon: "pixelarticons:message-text", - prefix: "/prompts", - tags: [], - discovery: {}, + description: "Prompt templating library", + icon: "mdi:message-text-outline", + path: "/packages/prompts", }, { title: "@funkai/cli", - description: "CLI for the funkai prompt SDK", - icon: "pixelarticons:terminal", - prefix: "/cli", - tags: [], - discovery: {}, + description: "Prompt CLI tooling", + icon: "mdi:console", + path: "/packages/cli", }, ], sections: [ // ── Getting Started ── { title: "Getting Started", - link: "/getting-started", - icon: "pixelarticons:speed-fast", - content: [ - "# Getting Started", - "", - "funkai is a lightweight, functional TypeScript framework for AI agent orchestration.", - "", - "## Packages", - "", - "| Package | Description |", - "| --- | --- |", - "| [`@funkai/agents`](/agents/) | Lightweight workflow and agent orchestration framework |", - "| [`@funkai/prompts`](/prompts/) | Prompt SDK with LiquidJS templating and Zod validation |", - "| [`@funkai/cli`](/cli/) | CLI for the funkai prompt SDK |", - "", - "## Quick Start", - "", - "```bash", - "pnpm add @funkai/agents", - "```", - "", - "Then check out the [Agents overview](/agents/) or the [Create an Agent guide](/agents/guides/create-agent).", - ].join("\n"), + icon: "mdi:rocket-launch-outline", + items: [ + { + title: "Introduction", + path: "/introduction", + include: "docs/introduction.md", + }, + { + title: "Quick Start", + path: "/quick-start", + include: "docs/quick-start.md", + }, + ], }, - // ── Agents ── + // ── Concepts ── { - title: "Agents", - icon: "pixelarticons:robot", - content: "Lightweight workflow and agent orchestration framework", + title: "Concepts", + icon: "mdi:lightbulb-outline", items: [ { - title: "Overview", - link: "/agents/", - from: "packages/agents/docs/overview.md", + title: "Agents", + path: "/concepts/agents", + include: "docs/concepts/agents.md", + }, + { + title: "Flow Agents", + path: "/concepts/flow-agents", + include: "docs/concepts/flow-agents.md", + }, + { + title: "Tools", + path: "/concepts/tools", + include: "docs/concepts/tools.md", + }, + { + title: "Prompts", + path: "/concepts/prompts", + include: "docs/concepts/prompts.md", + }, + { + title: "Models", + path: "/concepts/models", + include: "docs/concepts/models.md", + }, + ], + }, + + // ── Guides ── + { + title: "Guides", + icon: "mdi:book-open-page-variant-outline", + items: [ + { + title: "Streaming", + path: "/guides/streaming", + include: "packages/agents/docs/streaming.md", + }, + { + title: "Testing", + path: "/guides/testing", + include: "packages/agents/docs/test-agents.md", + }, + { + title: "Error Recovery", + path: "/guides/error-recovery", + include: "packages/agents/docs/error-recovery.md", + }, + { + title: "Multi-Agent Orchestration", + path: "/guides/multi-agent", + include: "packages/agents/docs/multi-agent-orchestration.md", + }, + { + title: "Cost Tracking", + path: "/guides/cost-tracking", + include: "packages/agents/docs/cost-tracking.md", }, + ], + }, + + // ── Reference ── + { + title: "Reference", + icon: "mdi:code-braces", + items: [ { - title: "Core", - prefix: "/agents/core", + title: "@funkai/agents", items: [ { - title: "Overview", - link: "/agents/core/overview", - from: "packages/agents/docs/core/overview.md", + title: "agent()", + path: "/reference/agents/agent", + include: "docs/reference/agent.md", }, { - title: "Agent", - link: "/agents/core/agent", - from: "packages/agents/docs/core/agent.md", + title: "flowAgent()", + path: "/reference/agents/flow-agent", + include: "docs/reference/flow-agent.md", }, { - title: "Workflow", - link: "/agents/core/workflow", - from: "packages/agents/docs/core/workflow.md", + title: "tool()", + path: "/reference/agents/tool", + include: "docs/reference/tool.md", + }, + ], + }, + { + title: "@funkai/models", + items: [ + { + title: "model()", + path: "/reference/models/model", + include: "docs/reference/model.md", }, { - title: "Step", - link: "/agents/core/step", - from: "packages/agents/docs/core/step.md", + title: "models()", + path: "/reference/models/models", + include: "docs/reference/models.md", }, { - title: "Tools", - link: "/agents/core/tools", - from: "packages/agents/docs/core/tools.md", + title: "createProviderRegistry()", + path: "/reference/models/provider-registry", + include: "docs/reference/provider-registry.md", }, { - title: "Hooks", - link: "/agents/core/hooks", - from: "packages/agents/docs/core/hooks.md", + title: "calculateCost()", + path: "/reference/models/calculate-cost", + include: "docs/reference/calculate-cost.md", }, ], }, { - title: "Guides", - prefix: "/agents/guides", - from: "packages/agents/docs/guides/*.md", - titleFrom: "heading", - sort: "alpha", - }, - { - title: "Provider", - prefix: "/agents/provider", + title: "@funkai/prompts", items: [ { - title: "Overview", - link: "/agents/provider/overview", - from: "packages/agents/docs/provider/overview.md", + title: "createPrompt()", + path: "/reference/prompts/create-prompt", + include: "docs/reference/create-prompt.md", }, { - title: "Models", - link: "/agents/provider/models", - from: "packages/agents/docs/provider/models.md", + title: "createPromptGroup()", + path: "/reference/prompts/create-prompt-group", + include: "docs/reference/create-prompt-group.md", }, { - title: "Usage", - link: "/agents/provider/usage", - from: "packages/agents/docs/provider/usage.md", + title: "createPromptRegistry()", + path: "/reference/prompts/create-prompt-registry", + include: "docs/reference/create-prompt-registry.md", + }, + { + title: "CLI", + path: "/reference/prompts/cli", + include: "docs/reference/prompts-cli.md", }, ], }, - { - title: "Troubleshooting", - link: "/agents/troubleshooting", - from: "packages/agents/docs/troubleshooting.md", - }, ], }, - // ── Prompts ── + // ── Packages (standalone — READMEs with nested Changelogs) ── { - title: "Prompts", - icon: "pixelarticons:message-text", - frontmatter: { - description: "Prompt SDK with LiquidJS templating and Zod validation", - }, + title: "Packages", + icon: "mdi:package-variant-closed", + standalone: true, items: [ { - title: "Overview", - link: "/prompts/", - from: "packages/prompts/docs/overview.md", - }, - { - title: "File Format", - prefix: "/prompts/file-format", + title: "@funkai/agents", + path: "/packages/agents", + include: "packages/agents/README.md", items: [ { - title: "Overview", - link: "/prompts/file-format/overview", - from: "packages/prompts/docs/file-format/overview.md", - }, - { - title: "Frontmatter", - link: "/prompts/file-format/frontmatter", - from: "packages/prompts/docs/file-format/frontmatter.md", - }, - { - title: "Partials", - link: "/prompts/file-format/partials", - from: "packages/prompts/docs/file-format/partials.md", + title: "Changelog", + path: "/packages/agents/changelog", + include: "packages/agents/CHANGELOG.md", }, ], }, { - title: "CLI", - prefix: "/prompts/cli", + title: "@funkai/models", + path: "/packages/models", + include: "packages/models/README.md", items: [ { - title: "Overview", - link: "/prompts/cli/overview", - from: "packages/prompts/docs/cli/overview.md", - }, - { - title: "Commands", - link: "/prompts/cli/commands", - from: "packages/prompts/docs/cli/commands.md", + title: "Changelog", + path: "/packages/models/changelog", + include: "packages/models/CHANGELOG.md", }, ], }, { - title: "Code Generation", - link: "/prompts/codegen/overview", - from: "packages/prompts/docs/codegen/overview.md", - }, - { - title: "Library", - link: "/prompts/library/overview", - from: "packages/prompts/docs/library/overview.md", - }, - { - title: "Guides", - prefix: "/prompts/guides", - from: "packages/prompts/docs/guides/*.md", - titleFrom: "heading", - sort: "alpha", + title: "@funkai/prompts", + path: "/packages/prompts", + include: "packages/prompts/README.md", + items: [ + { + title: "Changelog", + path: "/packages/prompts/changelog", + include: "packages/prompts/CHANGELOG.md", + }, + ], }, { - title: "Troubleshooting", - link: "/prompts/troubleshooting", - from: "packages/prompts/docs/troubleshooting.md", + title: "@funkai/cli", + path: "/packages/cli", + include: "packages/cli/README.md", + items: [ + { + title: "Changelog", + path: "/packages/cli/changelog", + include: "packages/cli/CHANGELOG.md", + }, + ], }, ], }, - // ── CLI ── + // ── Examples ── { - title: "CLI", - icon: "pixelarticons:terminal", - link: "/cli/", - from: "packages/cli/README.md", + title: "Examples", + icon: "mdi:file-document-outline", + items: [ + { + title: "Real-World CLI", + path: "/examples/realworld-cli", + include: "examples/realworld-cli/README.md", + }, + ], }, // ── Contributing ── { title: "Contributing", - icon: "pixelarticons:git-merge", - isolated: true, + icon: "mdi:source-merge", + standalone: true, + hidden: true, items: [ { title: "Overview", - link: "/contributing/overview", - from: "contributing/README.md", + path: "/contributing/overview", + include: "contributing/README.md", }, { - title: "Concepts", - prefix: "/contributing/concepts", - from: "contributing/concepts/*.md", - titleFrom: "heading", + title: { from: "heading" }, + path: "/contributing/concepts", + include: "contributing/concepts/*.md", sort: "alpha", }, { - title: "Guides", - prefix: "/contributing/guides", - from: "contributing/guides/*.md", - titleFrom: "heading", + title: { from: "heading" }, + path: "/contributing/guides", + include: "contributing/guides/*.md", sort: "alpha", }, { title: "Standards", items: [ { - title: "TypeScript", - prefix: "/contributing/standards/typescript", - from: "contributing/standards/typescript/*.md", - titleFrom: "heading", + title: { from: "heading" }, + path: "/contributing/standards/typescript", + include: "contributing/standards/typescript/*.md", sort: "alpha", }, { - title: "Documentation", - prefix: "/contributing/standards/documentation", - from: "contributing/standards/documentation/*.md", - titleFrom: "heading", + title: { from: "heading" }, + path: "/contributing/standards/documentation", + include: "contributing/standards/documentation/*.md", sort: "alpha", }, { - title: "Git", - prefix: "/contributing/standards/git", - from: "contributing/standards/git-*.md", - titleFrom: "heading", + title: { from: "heading" }, + path: "/contributing/standards/git", + include: "contributing/standards/git-*.md", sort: "alpha", }, ],