diff --git a/README.md b/README.md
index b26e308..7760341 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,18 @@
-# Bingsan
+
+
+
-[](https://golang.org)
-[](LICENSE)
-[](https://goreportcard.com/report/github.com/teamPaprika/bingsan)
+Bingsan
-**High-performance Apache Iceberg REST Catalog in Go — 2-3x faster than alternatives**
+
+
+
+
+
+
+
+ High-performance Apache Iceberg REST Catalog in Go — 2-3x faster than alternatives
+
[한국어](#한국어) | [English](#english)
diff --git a/assets/bingsan-logo.png b/assets/bingsan-logo.png
new file mode 100644
index 0000000..b1b90b6
Binary files /dev/null and b/assets/bingsan-logo.png differ
diff --git a/cmd/iceberg-catalog/main.go b/cmd/iceberg-catalog/main.go
index 2dc30af..14b8e56 100644
--- a/cmd/iceberg-catalog/main.go
+++ b/cmd/iceberg-catalog/main.go
@@ -7,9 +7,9 @@ import (
"os/signal"
"syscall"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
)
func main() {
diff --git a/docs-site/.github/workflows/deploy-docs.yml b/docs-site/.github/workflows/deploy-docs.yml
new file mode 100644
index 0000000..0c2b26a
--- /dev/null
+++ b/docs-site/.github/workflows/deploy-docs.yml
@@ -0,0 +1,60 @@
+name: Deploy Documentation
+
+on:
+ push:
+ branches: [main]
+ paths:
+ - 'docs-site/**'
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+concurrency:
+ group: 'pages'
+ cancel-in-progress: true
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: docs-site
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 9
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 20
+ cache: 'pnpm'
+ cache-dependency-path: docs-site/pnpm-lock.yaml
+
+ - name: Install dependencies
+ run: pnpm install --frozen-lockfile
+
+ - name: Build
+ run: pnpm build
+ env:
+ NEXT_PUBLIC_BASE_PATH: /bingsan
+
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: docs-site/out
+
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ needs: build
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
diff --git a/docs-site/.gitignore b/docs-site/.gitignore
deleted file mode 100644
index 39d7bb1..0000000
--- a/docs-site/.gitignore
+++ /dev/null
@@ -1,16 +0,0 @@
-# Hugo build output
-public/
-
-# Hugo cache
-resources/_gen/
-.hugo_build.lock
-
-# OS files
-.DS_Store
-Thumbs.db
-
-# IDE
-.idea/
-.vscode/
-*.swp
-*.swo
diff --git a/docs-site/README.md b/docs-site/README.md
deleted file mode 100644
index e768f4e..0000000
--- a/docs-site/README.md
+++ /dev/null
@@ -1,309 +0,0 @@
-# Bingsan Documentation
-
-This directory contains the Hugo-based documentation site for Bingsan.
-
-## Prerequisites
-
-- [Hugo](https://gohugo.io/) v0.110.0 or later (extended edition recommended)
-- Git (for theme submodule)
-
-### Install Hugo
-
-**macOS (Homebrew):**
-```bash
-brew install hugo
-```
-
-**Linux (Snap):**
-```bash
-snap install hugo
-```
-
-**Windows (Chocolatey):**
-```bash
-choco install hugo-extended
-```
-
-## Local Development
-
-### Clone with Submodules
-
-If cloning fresh, include submodules:
-```bash
-git clone --recurse-submodules https://github.com/kimuyb/bingsan.git
-cd bingsan/docs-site
-```
-
-Or initialize submodules after cloning:
-```bash
-git submodule update --init --recursive
-```
-
-### Start Development Server
-
-```bash
-hugo server -D
-```
-
-The site will be available at `http://localhost:1313/`.
-
-Options:
-- `-D` includes draft content
-- `--bind 0.0.0.0` to access from other devices
-- `-p 8080` to use a different port
-
-### Build for Production
-
-```bash
-hugo --minify
-```
-
-The static site will be generated in the `public/` directory.
-
-## Project Structure
-
-```
-docs-site/
-├── archetypes/ # Templates for new content
-├── content/
-│ ├── _index.md # Homepage
-│ └── docs/ # Documentation pages
-│ ├── getting-started/
-│ ├── api/
-│ ├── configuration/
-│ ├── architecture/
-│ └── deployment/
-├── static/ # Static assets (images, etc.)
-├── themes/
-│ └── hugo-book/ # Theme (git submodule)
-├── hugo.toml # Site configuration
-└── README.md # This file
-```
-
-## Adding Content
-
-### New Documentation Page
-
-```bash
-hugo new docs/section-name/page-name.md
-```
-
-### Page Front Matter
-
-```yaml
----
-title: "Page Title"
-weight: 1 # Controls ordering in sidebar
-bookCollapseSection: true # Collapse child pages in sidebar
----
-```
-
-### Content Guidelines
-
-1. Use relative links: `{{< relref "/docs/api/tables" >}}`
-2. Use markdown alerts for callouts: `> [!WARNING]` or `> [!NOTE]`
-3. Include code examples with proper syntax highlighting
-4. Keep paragraphs concise
-
-## Deployment
-
-### GitHub Pages
-
-1. Create `.github/workflows/hugo.yml`:
-
-```yaml
-name: Deploy Hugo site to Pages
-
-on:
- push:
- branches: ["main"]
- workflow_dispatch:
-
-permissions:
- contents: read
- pages: write
- id-token: write
-
-concurrency:
- group: "pages"
- cancel-in-progress: false
-
-defaults:
- run:
- shell: bash
-
-jobs:
- build:
- runs-on: ubuntu-latest
- env:
- HUGO_VERSION: 0.153.0
- steps:
- - name: Install Hugo CLI
- run: |
- wget -O ${{ runner.temp }}/hugo.deb https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-amd64.deb \
- && sudo dpkg -i ${{ runner.temp }}/hugo.deb
- - name: Checkout
- uses: actions/checkout@v4
- with:
- submodules: recursive
- - name: Setup Pages
- id: pages
- uses: actions/configure-pages@v4
- - name: Build with Hugo
- working-directory: docs-site
- env:
- HUGO_ENVIRONMENT: production
- HUGO_ENV: production
- run: |
- hugo \
- --minify \
- --baseURL "${{ steps.pages.outputs.base_url }}/"
- - name: Upload artifact
- uses: actions/upload-pages-artifact@v3
- with:
- path: ./docs-site/public
-
- deploy:
- environment:
- name: github-pages
- url: ${{ steps.deployment.outputs.page_url }}
- runs-on: ubuntu-latest
- needs: build
- steps:
- - name: Deploy to GitHub Pages
- id: deployment
- uses: actions/deploy-pages@v4
-```
-
-2. Enable GitHub Pages in repository settings (Settings > Pages > Source: GitHub Actions)
-
-### Netlify
-
-1. Create `netlify.toml` in the docs-site directory:
-
-```toml
-[build]
- command = "hugo --minify"
- publish = "public"
-
-[build.environment]
- HUGO_VERSION = "0.153.0"
- HUGO_ENV = "production"
-
-[context.production.environment]
- HUGO_BASEURL = "https://your-site.netlify.app/"
-
-[context.deploy-preview]
- command = "hugo --minify --buildDrafts --buildFuture -b $DEPLOY_PRIME_URL"
-
-[context.branch-deploy]
- command = "hugo --minify -b $DEPLOY_PRIME_URL"
-```
-
-2. Connect your repository to Netlify
-
-### Vercel
-
-1. Create `vercel.json`:
-
-```json
-{
- "build": {
- "env": {
- "HUGO_VERSION": "0.153.0"
- }
- },
- "installCommand": "yum install -y wget && wget -O /tmp/hugo.tar.gz https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_Linux-64bit.tar.gz && tar -xzf /tmp/hugo.tar.gz -C /usr/local/bin",
- "buildCommand": "hugo --minify",
- "outputDirectory": "public"
-}
-```
-
-### Docker
-
-Build a Docker image for the documentation:
-
-```dockerfile
-FROM klakegg/hugo:0.153.0-ext-alpine AS builder
-WORKDIR /src
-COPY . .
-RUN hugo --minify
-
-FROM nginx:alpine
-COPY --from=builder /src/public /usr/share/nginx/html
-EXPOSE 80
-```
-
-Build and run:
-```bash
-docker build -t bingsan-docs .
-docker run -p 8080:80 bingsan-docs
-```
-
-### Self-Hosted (Nginx)
-
-1. Build the site:
-```bash
-hugo --minify
-```
-
-2. Copy `public/` to your web server:
-```bash
-rsync -avz public/ user@server:/var/www/docs/
-```
-
-3. Configure Nginx:
-```nginx
-server {
- listen 80;
- server_name docs.bingsan.dev;
- root /var/www/docs;
-
- location / {
- try_files $uri $uri/ =404;
- }
-
- error_page 404 /404.html;
-}
-```
-
-## Theme
-
-This site uses the [Hugo Book](https://github.com/alex-shpak/hugo-book) theme. It's included as a Git submodule.
-
-### Update Theme
-
-```bash
-cd themes/hugo-book
-git pull origin master
-cd ../..
-git add themes/hugo-book
-git commit -m "Update hugo-book theme"
-```
-
-## Troubleshooting
-
-### Theme Not Found
-
-```bash
-git submodule update --init --recursive
-```
-
-### Build Fails with Git Error
-
-If you see "Failed to read Git log", disable Git info:
-
-```toml
-# hugo.toml
-enableGitInfo = false
-```
-
-### Content Not Appearing
-
-- Check front matter format (YAML between `---`)
-- Ensure `draft: true` is removed or build with `-D` flag
-- Verify file is in `content/` directory
-
-## License
-
-Documentation is licensed under Apache License 2.0, same as Bingsan itself.
diff --git a/docs-site/app/[lang]/docs/[[...slug]]/page.tsx b/docs-site/app/[lang]/docs/[[...slug]]/page.tsx
new file mode 100644
index 0000000..8b047a0
--- /dev/null
+++ b/docs-site/app/[lang]/docs/[[...slug]]/page.tsx
@@ -0,0 +1,58 @@
+import { source } from '@/lib/source';
+import {
+ DocsPage,
+ DocsBody,
+ DocsTitle,
+ DocsDescription,
+} from 'fumadocs-ui/page';
+import { notFound } from 'next/navigation';
+import defaultMdxComponents from 'fumadocs-ui/mdx';
+import { type Locale, i18n } from '@/lib/i18n';
+
+export default async function Page({
+ params,
+}: {
+ params: Promise<{ lang: Locale; slug?: string[] }>;
+}) {
+ const { lang, slug } = await params;
+ const page = source.getPage(slug, lang);
+
+ if (!page) notFound();
+
+ const MDX = page.data.body;
+
+ return (
+
+ {page.data.title}
+ {page.data.description}
+
+
+
+
+ );
+}
+
+export function generateStaticParams() {
+ return source.generateParams().flatMap((params) =>
+ i18n.languages.map((lang) => ({
+ ...params,
+ lang,
+ }))
+ );
+}
+
+export async function generateMetadata({
+ params,
+}: {
+ params: Promise<{ lang: Locale; slug?: string[] }>;
+}) {
+ const { lang, slug } = await params;
+ const page = source.getPage(slug, lang);
+
+ if (!page) notFound();
+
+ return {
+ title: page.data.title,
+ description: page.data.description,
+ };
+}
diff --git a/docs-site/app/[lang]/docs/layout.tsx b/docs-site/app/[lang]/docs/layout.tsx
new file mode 100644
index 0000000..b9d75d5
--- /dev/null
+++ b/docs-site/app/[lang]/docs/layout.tsx
@@ -0,0 +1,38 @@
+import { source } from '@/lib/source';
+import { DocsLayout } from 'fumadocs-ui/layouts/docs';
+import type { ReactNode } from 'react';
+import { type Locale, i18n } from '@/lib/i18n';
+import Image from 'next/image';
+
+export default async function Layout({
+ params,
+ children,
+}: {
+ params: Promise<{ lang: Locale }>;
+ children: ReactNode;
+}) {
+ const { lang } = await params;
+
+ return (
+
+
+ Bingsan
+
+ ),
+ }}
+ i18n={i18n}
+ >
+ {children}
+
+ );
+}
diff --git a/docs-site/app/[lang]/layout.tsx b/docs-site/app/[lang]/layout.tsx
new file mode 100644
index 0000000..ab057c9
--- /dev/null
+++ b/docs-site/app/[lang]/layout.tsx
@@ -0,0 +1,37 @@
+import { RootProvider } from 'fumadocs-ui/provider/next';
+import { defineI18nUI } from 'fumadocs-ui/i18n';
+import type { ReactNode } from 'react';
+import { i18n, type Locale } from '@/lib/i18n';
+
+const { provider } = defineI18nUI(i18n, {
+ translations: {
+ en: {
+ displayName: 'English',
+ },
+ ko: {
+ displayName: '한국어',
+ search: '문서 검색...',
+ toc: '이 페이지의 내용',
+ },
+ },
+});
+
+export function generateStaticParams() {
+ return i18n.languages.map((lang) => ({ lang }));
+}
+
+export default async function LangLayout({
+ params,
+ children,
+}: {
+ params: Promise<{ lang: Locale }>;
+ children: ReactNode;
+}) {
+ const { lang } = await params;
+
+ return (
+
+ {children}
+
+ );
+}
diff --git a/docs-site/app/[lang]/page.tsx b/docs-site/app/[lang]/page.tsx
new file mode 100644
index 0000000..bfceb8d
--- /dev/null
+++ b/docs-site/app/[lang]/page.tsx
@@ -0,0 +1,119 @@
+import Link from 'next/link';
+import Image from 'next/image';
+import { type Locale, i18n } from '@/lib/i18n';
+
+const content: Record = {
+ en: {
+ title: 'Bingsan',
+ subtitle: 'High-Performance Apache Iceberg REST Catalog',
+ description:
+ 'A production-ready Iceberg REST catalog implementation in Go, designed for performance and scalability.',
+ getStarted: 'Get Started',
+ apiReference: 'API Reference',
+ features: [
+ {
+ title: 'High Performance',
+ description: 'Built with Go for maximum throughput and minimal latency',
+ },
+ {
+ title: 'Iceberg Compatible',
+ description: 'Full REST catalog API compliance with Spark, Trino, and PyIceberg',
+ },
+ {
+ title: 'Production Ready',
+ description: 'PostgreSQL backend with connection pooling and metrics',
+ },
+ ],
+ },
+ ko: {
+ title: 'Bingsan',
+ subtitle: '고성능 Apache Iceberg REST 카탈로그',
+ description:
+ 'Go로 구현된 프로덕션 레디 Iceberg REST 카탈로그입니다. 성능과 확장성을 위해 설계되었습니다.',
+ getStarted: '시작하기',
+ apiReference: 'API 레퍼런스',
+ features: [
+ {
+ title: '고성능',
+ description: '최대 처리량과 최소 지연 시간을 위해 Go로 구축',
+ },
+ {
+ title: 'Iceberg 호환',
+ description: 'Spark, Trino, PyIceberg와 완벽한 REST 카탈로그 API 호환',
+ },
+ {
+ title: '프로덕션 레디',
+ description: '커넥션 풀링과 메트릭이 포함된 PostgreSQL 백엔드',
+ },
+ ],
+ },
+};
+
+export function generateStaticParams() {
+ return i18n.languages.map((lang) => ({ lang }));
+}
+
+export default async function HomePage({
+ params,
+}: {
+ params: Promise<{ lang: Locale }>;
+}) {
+ const { lang } = await params;
+ const t = content[lang] ?? content.en;
+
+ return (
+
+
+
+
+
+
{t.title}
+
{t.subtitle}
+
{t.description}
+
+
+
+ {t.getStarted}
+
+
+ {t.apiReference}
+
+
+
+
+ {t.features.map((feature, index) => (
+
+
{feature.title}
+
+ {feature.description}
+
+
+ ))}
+
+
+
+ );
+}
diff --git a/docs-site/app/global.css b/docs-site/app/global.css
new file mode 100644
index 0000000..2537461
--- /dev/null
+++ b/docs-site/app/global.css
@@ -0,0 +1,6 @@
+@import 'tailwindcss';
+@import 'fumadocs-ui/css/neutral.css';
+@import 'fumadocs-ui/css/preset.css';
+
+/* Source the fumadocs-ui package for Tailwind v4 */
+@source '../node_modules/fumadocs-ui/dist/**/*.js';
diff --git a/docs-site/app/layout.tsx b/docs-site/app/layout.tsx
new file mode 100644
index 0000000..751c9d2
--- /dev/null
+++ b/docs-site/app/layout.tsx
@@ -0,0 +1,32 @@
+import './global.css';
+import type { ReactNode } from 'react';
+import type { Metadata } from 'next';
+
+export const metadata: Metadata = {
+ title: {
+ default: 'Bingsan Documentation',
+ template: '%s | Bingsan',
+ },
+ description: 'High-performance Apache Iceberg REST Catalog documentation',
+ keywords: ['Apache Iceberg', 'REST Catalog', 'Data Lake', 'Go', 'PostgreSQL', 'Spark', 'Trino', 'PyIceberg'],
+ authors: [{ name: 'Bingsan Team' }],
+ openGraph: {
+ title: 'Bingsan Documentation',
+ description: 'High-performance Apache Iceberg REST Catalog implemented in Go',
+ type: 'website',
+ locale: 'en_US',
+ },
+ icons: {
+ icon: '/favicon.svg',
+ },
+};
+
+export default function RootLayout({ children }: { children: ReactNode }) {
+ return (
+
+
+ {children}
+
+
+ );
+}
diff --git a/docs-site/app/page.tsx b/docs-site/app/page.tsx
new file mode 100644
index 0000000..915fdd8
--- /dev/null
+++ b/docs-site/app/page.tsx
@@ -0,0 +1,5 @@
+import { redirect } from 'next/navigation';
+
+export default function RootPage() {
+ redirect('/en');
+}
diff --git a/docs-site/archetypes/default.md b/docs-site/archetypes/default.md
deleted file mode 100644
index 25b6752..0000000
--- a/docs-site/archetypes/default.md
+++ /dev/null
@@ -1,5 +0,0 @@
-+++
-date = '{{ .Date }}'
-draft = true
-title = '{{ replace .File.ContentBaseName "-" " " | title }}'
-+++
diff --git a/docs-site/content/_index.md b/docs-site/content/_index.md
deleted file mode 100644
index 79737a4..0000000
--- a/docs-site/content/_index.md
+++ /dev/null
@@ -1,65 +0,0 @@
----
-title: "Bingsan - Apache Iceberg REST Catalog"
-type: docs
----
-
-# Bingsan
-
-**Bingsan** is a high-performance Apache Iceberg REST Catalog implemented in Go. It uses PostgreSQL as the metadata store and leverages Go's concurrency model to provide high throughput and low latency.
-
-## Key Features
-
-- **Complete Iceberg REST API**: Compliant with Apache Iceberg REST Catalog OpenAPI spec
-- **PostgreSQL Backend**: Reliable metadata storage with ACID transactions
-- **Go Concurrency**: Efficient request handling using goroutines
-- **Real-time Event Streaming**: Receive catalog change events via WebSocket
-- **Prometheus Metrics**: Metrics endpoint for operational monitoring
-- **Kubernetes-Friendly**: Supports single-node or multi-node cluster deployments
-
-## Quick Links
-
-- **[Getting Started]({{< relref "/docs/getting-started/quick-start" >}})** - Get up and running quickly with Bingsan
-- **[API Reference]({{< relref "/docs/api" >}})** - Complete API documentation for all endpoints
-- **[Configuration Guide]({{< relref "/docs/configuration" >}})** - Learn how to configure Bingsan for your environment
-
-## Architecture
-
-```
-┌─────────────────────────────────────────────────────────────┐
-│ Clients │
-│ (Spark, Trino, Flink, PyIceberg, etc.) │
-└─────────────────────────┬───────────────────────────────────┘
- │ REST API
-┌─────────────────────────▼───────────────────────────────────┐
-│ Bingsan Cluster │
-│ ┌─────────┐ ┌─────────┐ ┌─────────┐ │
-│ │ Node 1 │ │ Node 2 │ │ Node N │ (Kubernetes Pods) │
-│ │ :8181 │ │ :8181 │ │ :8181 │ │
-│ └────┬────┘ └────┬────┘ └────┬────┘ │
-│ └────────────┼────────────┘ │
-└────────────────────┼────────────────────────────────────────┘
- │
- ┌────────────┴────────────┐
- │ │
-┌───────▼───────┐ ┌────────▼────────┐
-│ PostgreSQL │ │ S3 / GCS │
-│ (Metadata) │ │ (Data Lake) │
-└───────────────┘ └─────────────────┘
-```
-
-## Compatibility
-
-Bingsan is compatible with:
-
-- Apache Spark (Iceberg Spark runtime)
-- Trino / Presto
-- Apache Flink
-- PyIceberg
-- Dremio
-- StarRocks
-
-It is designed as a drop-in replacement for existing Java REST catalogs like Apache Polaris, Gravitino, Nessie, and Unity Catalog.
-
-## License
-
-Apache License 2.0
diff --git a/docs-site/content/docs/_index.md b/docs-site/content/docs/_index.md
deleted file mode 100644
index 52a8a57..0000000
--- a/docs-site/content/docs/_index.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: "Documentation"
-weight: 1
-bookFlatSection: true
----
-
-# Bingsan Documentation
-
-Welcome to the Bingsan documentation. This guide covers everything you need to know about deploying and using Bingsan as your Apache Iceberg REST Catalog.
-
-## What is Bingsan?
-
-Bingsan is a production-grade Apache Iceberg REST Catalog implementation in Go. It provides:
-
-- Full compliance with the Apache Iceberg REST Catalog OpenAPI specification
-- High-performance metadata operations using Go's concurrency model
-- PostgreSQL-backed metadata storage with ACID guarantees
-- Real-time event streaming via WebSocket
-- Cloud-native deployment support (Kubernetes, Docker)
-
-## Documentation Sections
-
-### [Getting Started]({{< relref "/docs/getting-started" >}})
-Installation, prerequisites, and quick start guide.
-
-### [API Reference]({{< relref "/docs/api" >}})
-Complete documentation of all REST API endpoints.
-
-### [Configuration]({{< relref "/docs/configuration" >}})
-Server, database, storage, and authentication configuration options.
-
-### [Architecture]({{< relref "/docs/architecture" >}})
-System design, components, and scalability considerations.
-
-### [Performance]({{< relref "/docs/performance" >}})
-Object pooling, distributed locking, benchmarking, and tuning guides.
-
-### [Deployment]({{< relref "/docs/deployment" >}})
-Production deployment guides for Docker and Kubernetes.
diff --git a/docs-site/content/docs/api/configuration.md b/docs-site/content/docs/api/configuration.md
deleted file mode 100644
index b89a4bb..0000000
--- a/docs-site/content/docs/api/configuration.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-title: "Configuration Endpoint"
-weight: 1
----
-
-# Configuration Endpoint
-
-Retrieve catalog configuration and defaults.
-
-## Get Configuration
-
-Returns the catalog's configuration properties, including storage defaults and feature flags.
-
-### Request
-
-```http
-GET /v1/config
-```
-
-### Response
-
-```json
-{
- "defaults": {
- "warehouse": "s3://my-bucket/warehouse"
- },
- "overrides": {}
-}
-```
-
-### Response Fields
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `defaults` | object | Default configuration properties |
-| `defaults.warehouse` | string | Default warehouse location |
-| `overrides` | object | Properties that override client settings |
-
-### Example
-
-```bash
-curl http://localhost:8181/v1/config
-```
-
-### Usage
-
-Clients typically call this endpoint during initialization to discover:
-
-- Default warehouse location for new tables
-- Required configuration overrides
-- Catalog capabilities
-
-### Notes
-
-- This endpoint does not require authentication (even when auth is enabled)
-- The response varies based on server configuration
-- Clients should merge `defaults` with their local config, then apply `overrides`
diff --git a/docs-site/content/docs/api/events.md b/docs-site/content/docs/api/events.md
deleted file mode 100644
index 57bb952..0000000
--- a/docs-site/content/docs/api/events.md
+++ /dev/null
@@ -1,383 +0,0 @@
----
-title: "Events"
-weight: 7
----
-
-# Events API
-
-Bingsan provides real-time event streaming via WebSocket, allowing clients to receive notifications about catalog changes as they happen.
-
-## Event Stream
-
-Connect to the event stream to receive real-time catalog events.
-
-### Endpoint
-
-```
-WebSocket: ws://localhost:8181/v1/events/stream
-```
-
-### Query Parameters
-
-| Parameter | Type | Required | Description |
-|-----------|------|----------|-------------|
-| `token` | string | Yes* | Authentication token (required when auth is enabled) |
-| `namespace` | string | No | Filter events to a specific namespace |
-
-### Connection Example
-
-Using `wscat`:
-
-```bash
-# Connect to event stream (no auth)
-wscat -c "ws://localhost:8181/v1/events/stream"
-
-# Connect with authentication
-wscat -c "ws://localhost:8181/v1/events/stream?token=YOUR_API_KEY"
-
-# Connect with namespace filter
-wscat -c "ws://localhost:8181/v1/events/stream?token=YOUR_API_KEY&namespace=analytics"
-```
-
-Using JavaScript:
-
-```javascript
-const ws = new WebSocket('ws://localhost:8181/v1/events/stream?token=YOUR_TOKEN');
-
-ws.onopen = () => {
- console.log('Connected to event stream');
-};
-
-ws.onmessage = (event) => {
- const data = JSON.parse(event.data);
- console.log('Received event:', data);
-};
-
-ws.onerror = (error) => {
- console.error('WebSocket error:', error);
-};
-
-ws.onclose = () => {
- console.log('Disconnected from event stream');
-};
-```
-
-Using Python:
-
-```python
-import websocket
-import json
-
-def on_message(ws, message):
- event = json.loads(message)
- print(f"Event: {event['type']} - {event}")
-
-def on_error(ws, error):
- print(f"Error: {error}")
-
-def on_close(ws, close_status_code, close_msg):
- print("Connection closed")
-
-def on_open(ws):
- print("Connected to event stream")
-
-ws = websocket.WebSocketApp(
- "ws://localhost:8181/v1/events/stream?token=YOUR_TOKEN",
- on_open=on_open,
- on_message=on_message,
- on_error=on_error,
- on_close=on_close
-)
-
-ws.run_forever()
-```
-
----
-
-## Event Types
-
-### Namespace Events
-
-#### namespace_created
-
-Emitted when a new namespace is created.
-
-```json
-{
- "type": "namespace_created",
- "timestamp": "2024-01-15T10:30:00.000Z",
- "namespace": ["analytics"],
- "properties": {
- "owner": "data-team"
- }
-}
-```
-
-#### namespace_updated
-
-Emitted when namespace properties are updated.
-
-```json
-{
- "type": "namespace_updated",
- "timestamp": "2024-01-15T10:35:00.000Z",
- "namespace": ["analytics"],
- "updates": {
- "owner": "platform-team"
- },
- "removals": ["description"]
-}
-```
-
-#### namespace_deleted
-
-Emitted when a namespace is deleted.
-
-```json
-{
- "type": "namespace_deleted",
- "timestamp": "2024-01-15T10:40:00.000Z",
- "namespace": ["analytics"]
-}
-```
-
-### Table Events
-
-#### table_created
-
-Emitted when a new table is created.
-
-```json
-{
- "type": "table_created",
- "timestamp": "2024-01-15T10:30:00.000Z",
- "namespace": ["analytics"],
- "table": "events",
- "table_uuid": "550e8400-e29b-41d4-a716-446655440000",
- "metadata_location": "s3://bucket/metadata/00000.metadata.json"
-}
-```
-
-#### table_updated
-
-Emitted when a table is updated (commit).
-
-```json
-{
- "type": "table_updated",
- "timestamp": "2024-01-15T10:35:00.000Z",
- "namespace": ["analytics"],
- "table": "events",
- "table_uuid": "550e8400-e29b-41d4-a716-446655440000",
- "previous_metadata_location": "s3://bucket/metadata/00000.metadata.json",
- "metadata_location": "s3://bucket/metadata/00001.metadata.json",
- "updates": ["add-snapshot", "set-snapshot-ref"]
-}
-```
-
-#### table_dropped
-
-Emitted when a table is dropped.
-
-```json
-{
- "type": "table_dropped",
- "timestamp": "2024-01-15T10:40:00.000Z",
- "namespace": ["analytics"],
- "table": "events",
- "table_uuid": "550e8400-e29b-41d4-a716-446655440000",
- "purge_requested": false
-}
-```
-
-#### table_renamed
-
-Emitted when a table is renamed.
-
-```json
-{
- "type": "table_renamed",
- "timestamp": "2024-01-15T10:45:00.000Z",
- "source_namespace": ["analytics"],
- "source_table": "old_name",
- "destination_namespace": ["analytics"],
- "destination_table": "new_name",
- "table_uuid": "550e8400-e29b-41d4-a716-446655440000"
-}
-```
-
-### View Events
-
-#### view_created
-
-```json
-{
- "type": "view_created",
- "timestamp": "2024-01-15T10:30:00.000Z",
- "namespace": ["analytics"],
- "view": "daily_summary",
- "view_uuid": "660e8400-e29b-41d4-a716-446655440001",
- "metadata_location": "s3://bucket/views/metadata/00000.metadata.json"
-}
-```
-
-#### view_updated
-
-```json
-{
- "type": "view_updated",
- "timestamp": "2024-01-15T10:35:00.000Z",
- "namespace": ["analytics"],
- "view": "daily_summary",
- "view_uuid": "660e8400-e29b-41d4-a716-446655440001",
- "previous_version_id": 1,
- "version_id": 2
-}
-```
-
-#### view_dropped
-
-```json
-{
- "type": "view_dropped",
- "timestamp": "2024-01-15T10:40:00.000Z",
- "namespace": ["analytics"],
- "view": "daily_summary",
- "view_uuid": "660e8400-e29b-41d4-a716-446655440001"
-}
-```
-
-#### view_renamed
-
-```json
-{
- "type": "view_renamed",
- "timestamp": "2024-01-15T10:45:00.000Z",
- "source_namespace": ["analytics"],
- "source_view": "old_view",
- "destination_namespace": ["analytics"],
- "destination_view": "new_view",
- "view_uuid": "660e8400-e29b-41d4-a716-446655440001"
-}
-```
-
-### Transaction Events
-
-#### transaction_committed
-
-```json
-{
- "type": "transaction_committed",
- "timestamp": "2024-01-15T10:30:00.000Z",
- "commit_id": "txn-770e8400-e29b-41d4-a716-446655440002",
- "tables_updated": [
- {"namespace": ["analytics"], "name": "events"},
- {"namespace": ["analytics"], "name": "aggregates"}
- ]
-}
-```
-
----
-
-## Event Schema
-
-All events share a common structure:
-
-```json
-{
- "type": "event_type",
- "timestamp": "ISO-8601 timestamp",
- ...event-specific fields
-}
-```
-
-### Common Fields
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `type` | string | Event type identifier |
-| `timestamp` | string | ISO-8601 timestamp when event occurred |
-
----
-
-## Connection Management
-
-### Heartbeat
-
-The server sends periodic ping frames to keep the connection alive. Clients should respond with pong frames.
-
-### Reconnection
-
-Clients should implement reconnection logic with exponential backoff:
-
-```python
-import time
-import websocket
-
-def connect_with_retry():
- max_retries = 10
- base_delay = 1 # seconds
-
- for attempt in range(max_retries):
- try:
- ws = websocket.create_connection(
- "ws://localhost:8181/v1/events/stream"
- )
- return ws
- except Exception as e:
- delay = base_delay * (2 ** attempt)
- print(f"Connection failed, retrying in {delay}s: {e}")
- time.sleep(delay)
-
- raise Exception("Failed to connect after max retries")
-```
-
-### Connection Limits
-
-| Limit | Value | Description |
-|-------|-------|-------------|
-| Max connections per IP | 100 | Maximum concurrent connections |
-| Idle timeout | 5 minutes | Connection closed if no activity |
-| Max message size | 64KB | Maximum event message size |
-
----
-
-## Use Cases
-
-### Cache Invalidation
-
-Invalidate local caches when tables change:
-
-```python
-def on_message(ws, message):
- event = json.loads(message)
- if event['type'] in ['table_updated', 'table_dropped']:
- cache_key = f"{event['namespace']}.{event['table']}"
- cache.invalidate(cache_key)
-```
-
-### Audit Logging
-
-Stream events to an audit log:
-
-```python
-def on_message(ws, message):
- event = json.loads(message)
- audit_log.append({
- 'timestamp': event['timestamp'],
- 'event_type': event['type'],
- 'details': event
- })
-```
-
-### Change Data Capture
-
-Trigger downstream processing when data changes:
-
-```python
-def on_message(ws, message):
- event = json.loads(message)
- if event['type'] == 'table_updated':
- if 'add-snapshot' in event.get('updates', []):
- trigger_etl_pipeline(event['namespace'], event['table'])
-```
diff --git a/docs-site/content/docs/api/health-metrics.md b/docs-site/content/docs/api/health-metrics.md
deleted file mode 100644
index 8842fe2..0000000
--- a/docs-site/content/docs/api/health-metrics.md
+++ /dev/null
@@ -1,331 +0,0 @@
----
-title: "Health & Metrics"
-weight: 8
----
-
-# Health & Metrics API
-
-Bingsan provides health check and Prometheus metrics endpoints for operational monitoring.
-
-## Health Check
-
-Simple health check endpoint for load balancers.
-
-### Request
-
-```http
-GET /health
-```
-
-### Response
-
-```json
-{"status": "ok"}
-```
-
-### HTTP Status Codes
-
-| Code | Description |
-|------|-------------|
-| 200 | Server is healthy |
-| 503 | Server is unhealthy |
-
-### Example
-
-```bash
-curl http://localhost:8181/health
-```
-
----
-
-## Readiness Check
-
-Readiness check including database connectivity.
-
-### Request
-
-```http
-GET /ready
-```
-
-### Response (Ready)
-
-```json
-{
- "status": "ready",
- "database": "connected"
-}
-```
-
-### Response (Not Ready)
-
-```json
-{
- "status": "not_ready",
- "database": "disconnected",
- "error": "connection refused"
-}
-```
-
-### HTTP Status Codes
-
-| Code | Description |
-|------|-------------|
-| 200 | Server is ready to accept requests |
-| 503 | Server is not ready (database unavailable) |
-
-### Example
-
-```bash
-curl http://localhost:8181/ready
-```
-
-### Kubernetes Usage
-
-Configure liveness and readiness probes:
-
-```yaml
-livenessProbe:
- httpGet:
- path: /health
- port: 8181
- initialDelaySeconds: 10
- periodSeconds: 10
-
-readinessProbe:
- httpGet:
- path: /ready
- port: 8181
- initialDelaySeconds: 5
- periodSeconds: 5
-```
-
----
-
-## Prometheus Metrics
-
-Expose metrics in Prometheus format.
-
-### Request
-
-```http
-GET /metrics
-```
-
-### Response
-
-```
-# HELP iceberg_catalog_http_requests_total Total number of HTTP requests
-# TYPE iceberg_catalog_http_requests_total counter
-iceberg_catalog_http_requests_total{method="GET",path="/v1/namespaces",status="200"} 1234
-
-# HELP iceberg_namespaces_total Total number of namespaces
-# TYPE iceberg_namespaces_total gauge
-iceberg_namespaces_total 15
-
-# HELP iceberg_tables_total Total number of tables per namespace
-# TYPE iceberg_tables_total gauge
-iceberg_tables_total{namespace="analytics"} 42
-iceberg_tables_total{namespace="raw"} 18
-
-# HELP iceberg_db_connections_open Current open database connections
-# TYPE iceberg_db_connections_open gauge
-iceberg_db_connections_open 5
-
-# HELP iceberg_db_connections_in_use Current in-use database connections
-# TYPE iceberg_db_connections_in_use gauge
-iceberg_db_connections_in_use 2
-```
-
-### Example
-
-```bash
-curl http://localhost:8181/metrics
-```
-
----
-
-## Available Metrics
-
-### HTTP Metrics
-
-| Metric | Type | Labels | Description |
-|--------|------|--------|-------------|
-| `iceberg_catalog_http_requests_total` | counter | method, path, status | Total HTTP requests |
-| `iceberg_catalog_http_request_duration_seconds` | histogram | method, path | Request duration |
-| `iceberg_catalog_http_requests_in_flight` | gauge | - | Current in-flight requests |
-| `iceberg_catalog_http_request_size_bytes` | histogram | method, path | Request body size |
-| `iceberg_catalog_http_response_size_bytes` | histogram | method, path | Response body size |
-
-### Catalog Metrics
-
-| Metric | Type | Labels | Description |
-|--------|------|--------|-------------|
-| `iceberg_namespaces_total` | gauge | - | Total namespaces |
-| `iceberg_tables_total` | gauge | namespace | Tables per namespace |
-| `iceberg_views_total` | gauge | namespace | Views per namespace |
-| `iceberg_commits_total` | counter | namespace, table | Table commits |
-| `iceberg_scan_plans_total` | counter | status | Scan plans by status |
-| `iceberg_transactions_total` | counter | status | Multi-table transactions |
-
-### Database Metrics
-
-| Metric | Type | Labels | Description |
-|--------|------|--------|-------------|
-| `iceberg_db_connections_open` | gauge | - | Open connections |
-| `iceberg_db_connections_in_use` | gauge | - | In-use connections |
-| `iceberg_db_connections_idle` | gauge | - | Idle connections |
-| `iceberg_db_connections_max` | gauge | - | Max connections |
-| `iceberg_db_wait_count_total` | counter | - | Connection wait count |
-| `iceberg_db_wait_duration_seconds_total` | counter | - | Connection wait time |
-
-### Event Metrics
-
-| Metric | Type | Labels | Description |
-|--------|------|--------|-------------|
-| `iceberg_events_published_total` | counter | type | Events published |
-| `iceberg_websocket_connections` | gauge | - | Active WebSocket connections |
-
----
-
-## Prometheus Configuration
-
-Add Bingsan to your Prometheus configuration:
-
-```yaml
-scrape_configs:
- - job_name: 'bingsan'
- static_configs:
- - targets: ['localhost:8181']
- metrics_path: /metrics
- scrape_interval: 15s
-```
-
-### Service Discovery (Kubernetes)
-
-```yaml
-scrape_configs:
- - job_name: 'bingsan'
- kubernetes_sd_configs:
- - role: pod
- relabel_configs:
- - source_labels: [__meta_kubernetes_pod_label_app]
- action: keep
- regex: bingsan
- - source_labels: [__meta_kubernetes_pod_container_port_number]
- action: keep
- regex: "8181"
-```
-
----
-
-## Grafana Dashboard
-
-### Key Panels
-
-#### Request Rate
-
-```promql
-sum(rate(iceberg_catalog_http_requests_total[5m])) by (method, status)
-```
-
-#### Request Latency (p99)
-
-```promql
-histogram_quantile(0.99, sum(rate(iceberg_catalog_http_request_duration_seconds_bucket[5m])) by (le, path))
-```
-
-#### Error Rate
-
-```promql
-sum(rate(iceberg_catalog_http_requests_total{status=~"5.."}[5m]))
-/
-sum(rate(iceberg_catalog_http_requests_total[5m]))
-```
-
-#### Database Connection Pool
-
-```promql
-iceberg_db_connections_in_use / iceberg_db_connections_max
-```
-
-#### Namespaces and Tables
-
-```promql
-iceberg_namespaces_total
-sum(iceberg_tables_total)
-```
-
-### Example Dashboard JSON
-
-Import this into Grafana:
-
-```json
-{
- "dashboard": {
- "title": "Bingsan Overview",
- "panels": [
- {
- "title": "Request Rate",
- "type": "graph",
- "targets": [
- {
- "expr": "sum(rate(iceberg_catalog_http_requests_total[5m])) by (status)",
- "legendFormat": "{{status}}"
- }
- ]
- },
- {
- "title": "Latency p99",
- "type": "graph",
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum(rate(iceberg_catalog_http_request_duration_seconds_bucket[5m])) by (le))",
- "legendFormat": "p99"
- }
- ]
- }
- ]
- }
-}
-```
-
----
-
-## Alerting Rules
-
-Example Prometheus alerting rules:
-
-```yaml
-groups:
- - name: bingsan
- rules:
- - alert: BingsanHighErrorRate
- expr: |
- sum(rate(iceberg_catalog_http_requests_total{status=~"5.."}[5m]))
- / sum(rate(iceberg_catalog_http_requests_total[5m])) > 0.05
- for: 5m
- labels:
- severity: critical
- annotations:
- summary: "High error rate in Bingsan"
- description: "Error rate is {{ $value | humanizePercentage }}"
-
- - alert: BingsanDatabaseConnectionPoolExhausted
- expr: iceberg_db_connections_in_use / iceberg_db_connections_max > 0.9
- for: 2m
- labels:
- severity: warning
- annotations:
- summary: "Database connection pool nearly exhausted"
-
- - alert: BingsanHighLatency
- expr: |
- histogram_quantile(0.99, sum(rate(iceberg_catalog_http_request_duration_seconds_bucket[5m])) by (le)) > 1
- for: 5m
- labels:
- severity: warning
- annotations:
- summary: "High request latency in Bingsan"
- description: "p99 latency is {{ $value }}s"
-```
diff --git a/docs-site/content/docs/api/oauth.md b/docs-site/content/docs/api/oauth.md
deleted file mode 100644
index f349601..0000000
--- a/docs-site/content/docs/api/oauth.md
+++ /dev/null
@@ -1,202 +0,0 @@
----
-title: "OAuth"
-weight: 9
----
-
-# OAuth API
-
-Bingsan supports OAuth2 token exchange for client authentication, compatible with the Iceberg REST Catalog specification.
-
-## Token Exchange
-
-Exchange credentials for an access token.
-
-### Request
-
-```http
-POST /v1/oauth/tokens
-Content-Type: application/x-www-form-urlencoded
-```
-
-### Form Parameters
-
-| Parameter | Type | Required | Description |
-|-----------|------|----------|-------------|
-| `grant_type` | string | Yes | Must be `client_credentials` |
-| `client_id` | string | Yes | Client identifier |
-| `client_secret` | string | Yes | Client secret |
-| `scope` | string | No | Requested scope (e.g., `catalog`) |
-
-### Response
-
-```json
-{
- "access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
- "token_type": "bearer",
- "expires_in": 3600,
- "scope": "catalog"
-}
-```
-
-### Response Fields
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `access_token` | string | Bearer token for API requests |
-| `token_type` | string | Always `bearer` |
-| `expires_in` | integer | Token lifetime in seconds |
-| `scope` | string | Granted scope |
-
-### Errors
-
-| Code | Error | Description |
-|------|-------|-------------|
-| 400 | `invalid_request` | Missing required parameters |
-| 401 | `invalid_client` | Invalid client credentials |
-| 403 | `access_denied` | Client not authorized |
-
-### Example
-
-```bash
-curl -X POST http://localhost:8181/v1/oauth/tokens \
- -H "Content-Type: application/x-www-form-urlencoded" \
- -d "grant_type=client_credentials" \
- -d "client_id=my-client" \
- -d "client_secret=my-secret" \
- -d "scope=catalog"
-```
-
----
-
-## Using Access Tokens
-
-Include the access token in API requests:
-
-```bash
-curl -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." \
- http://localhost:8181/v1/namespaces
-```
-
----
-
-## Token Refresh
-
-When a token expires, request a new one using the same credentials. Bingsan does not currently support refresh tokens.
-
----
-
-## Client Configuration
-
-### Spark
-
-```properties
-spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog
-spark.sql.catalog.my_catalog.type=rest
-spark.sql.catalog.my_catalog.uri=http://localhost:8181
-spark.sql.catalog.my_catalog.credential=client_id:client_secret
-```
-
-### PyIceberg
-
-```python
-from pyiceberg.catalog import load_catalog
-
-catalog = load_catalog(
- "rest",
- uri="http://localhost:8181",
- credential="client_id:client_secret"
-)
-```
-
-### Trino
-
-```properties
-connector.name=iceberg
-iceberg.catalog.type=rest
-iceberg.rest-catalog.uri=http://localhost:8181
-iceberg.rest-catalog.security=OAUTH2
-iceberg.rest-catalog.oauth2.client-id=client_id
-iceberg.rest-catalog.oauth2.client-secret=client_secret
-```
-
----
-
-## Configuration
-
-Enable OAuth in `config.yaml`:
-
-```yaml
-auth:
- enabled: true
- token_expiry: 1h
- signing_key: "your-secure-signing-key-change-in-production"
-
- oauth2:
- enabled: true
- # Optional: Use external OAuth provider
- issuer: ""
- client_id: ""
- client_secret: ""
-```
-
-### Environment Variables
-
-```bash
-ICEBERG_AUTH_ENABLED=true
-ICEBERG_AUTH_TOKEN_EXPIRY=1h
-ICEBERG_AUTH_SIGNING_KEY=your-secure-signing-key
-ICEBERG_AUTH_OAUTH2_ENABLED=true
-```
-
----
-
-## Security Considerations
-
-### Signing Key
-
-- Use a strong, random signing key (at least 256 bits)
-- Store the key securely (e.g., Kubernetes secrets, vault)
-- Rotate keys periodically
-
-### Token Expiry
-
-- Default: 1 hour
-- Shorter expiry improves security but increases token exchange frequency
-- Consider your client's tolerance for re-authentication
-
-### HTTPS
-
-Always use HTTPS in production to protect credentials and tokens in transit.
-
-```yaml
-# Behind a TLS-terminating proxy
-server:
- host: 0.0.0.0
- port: 8181
-```
-
----
-
-## External OAuth Providers
-
-Bingsan can validate tokens from external OAuth providers (OIDC):
-
-```yaml
-auth:
- enabled: true
- oauth2:
- enabled: true
- issuer: "https://your-idp.example.com"
- # Leave client_id/client_secret empty when using external issuer
-```
-
-Tokens from the external provider are validated against the issuer's JWKS endpoint.
-
-### Supported Providers
-
-- Auth0
-- Okta
-- Keycloak
-- Azure AD
-- Google Identity Platform
-- Any OIDC-compliant provider
diff --git a/docs-site/content/docs/api/scan-planning.md b/docs-site/content/docs/api/scan-planning.md
deleted file mode 100644
index 512668b..0000000
--- a/docs-site/content/docs/api/scan-planning.md
+++ /dev/null
@@ -1,396 +0,0 @@
----
-title: "Scan Planning"
-weight: 5
----
-
-# Scan Planning API
-
-Server-side scan planning allows the catalog to optimize query planning by filtering manifests and data files based on predicates.
-
-## Overview
-
-Scan planning follows an asynchronous workflow:
-
-1. **Submit** a scan plan request
-2. **Poll** for plan completion
-3. **Fetch** scan tasks containing file references
-4. Optionally **cancel** an in-progress plan
-
-```
-┌─────────┐ Submit Plan ┌─────────┐ Poll Status ┌─────────┐
-│ Client │ ──────────────────▶│ Server │◀─────────────────▶│ Client │
-└─────────┘ └─────────┘ └─────────┘
- │
- │ Planning Complete
- ▼
-┌─────────┐ Fetch Tasks ┌─────────┐
-│ Client │◀──────────────────│ Server │
-└─────────┘ └─────────┘
-```
-
----
-
-## Submit Scan Plan
-
-Submit a new scan plan request.
-
-### Request
-
-```http
-POST /v1/namespaces/{namespace}/tables/{table}/plan
-```
-
-### Request Body
-
-```json
-{
- "snapshot-id": 123456789,
- "select": ["event_id", "user_id", "event_time"],
- "filter": {
- "type": "and",
- "left": {
- "type": "gte",
- "term": "event_time",
- "value": "2024-01-01T00:00:00Z"
- },
- "right": {
- "type": "lt",
- "term": "event_time",
- "value": "2024-02-01T00:00:00Z"
- }
- },
- "case-sensitive": true,
- "use-snapshot-schema": false,
- "start-snapshot-id": null,
- "end-snapshot-id": null
-}
-```
-
-### Request Fields
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `snapshot-id` | long | No | Snapshot to scan (latest if omitted) |
-| `select` | array[string] | No | Columns to project |
-| `filter` | object | No | Filter expression |
-| `case-sensitive` | boolean | No | Case-sensitive column matching (default: true) |
-| `use-snapshot-schema` | boolean | No | Use snapshot's schema vs current |
-| `start-snapshot-id` | long | No | For incremental scans |
-| `end-snapshot-id` | long | No | For incremental scans |
-
-### Filter Expression Types
-
-#### Comparison Operations
-
-```json
-{"type": "eq", "term": "column", "value": "value"}
-{"type": "neq", "term": "column", "value": "value"}
-{"type": "lt", "term": "column", "value": 100}
-{"type": "lte", "term": "column", "value": 100}
-{"type": "gt", "term": "column", "value": 100}
-{"type": "gte", "term": "column", "value": 100}
-```
-
-#### Logical Operations
-
-```json
-{"type": "and", "left": {...}, "right": {...}}
-{"type": "or", "left": {...}, "right": {...}}
-{"type": "not", "child": {...}}
-```
-
-#### Set Operations
-
-```json
-{"type": "in", "term": "column", "values": [1, 2, 3]}
-{"type": "not-in", "term": "column", "values": [1, 2, 3]}
-```
-
-#### Null Checks
-
-```json
-{"type": "is-null", "term": "column"}
-{"type": "not-null", "term": "column"}
-```
-
-#### String Operations
-
-```json
-{"type": "starts-with", "term": "column", "value": "prefix"}
-{"type": "not-starts-with", "term": "column", "value": "prefix"}
-```
-
-### Response
-
-```json
-{
- "plan-id": "plan-550e8400-e29b-41d4-a716-446655440000",
- "status": "submitted"
-}
-```
-
-### Example
-
-```bash
-curl -X POST http://localhost:8181/v1/namespaces/analytics/tables/events/plan \
- -H "Content-Type: application/json" \
- -d '{
- "select": ["event_id", "user_id"],
- "filter": {"type": "eq", "term": "event_type", "value": "click"}
- }'
-```
-
----
-
-## Get Scan Plan Status
-
-Poll for scan plan completion.
-
-### Request
-
-```http
-GET /v1/namespaces/{namespace}/tables/{table}/plan/{plan-id}
-```
-
-### Response (In Progress)
-
-```json
-{
- "plan-id": "plan-550e8400-e29b-41d4-a716-446655440000",
- "status": "planning",
- "progress": {
- "manifests-scanned": 50,
- "manifests-total": 100,
- "data-files-matched": 1250
- }
-}
-```
-
-### Response (Complete)
-
-```json
-{
- "plan-id": "plan-550e8400-e29b-41d4-a716-446655440000",
- "status": "complete",
- "statistics": {
- "manifests-scanned": 100,
- "manifests-skipped": 45,
- "data-files-matched": 2500,
- "data-files-skipped": 15000,
- "total-file-size-bytes": 10737418240,
- "planning-duration-ms": 1250
- }
-}
-```
-
-### Status Values
-
-| Status | Description |
-|--------|-------------|
-| `submitted` | Plan request received |
-| `planning` | Scanning manifests and files |
-| `complete` | Planning finished, ready to fetch tasks |
-| `failed` | Planning encountered an error |
-| `cancelled` | Plan was cancelled |
-
-### Errors
-
-| Code | Error | Description |
-|------|-------|-------------|
-| 404 | `NoSuchPlanException` | Plan ID not found |
-
-### Example
-
-```bash
-curl http://localhost:8181/v1/namespaces/analytics/tables/events/plan/plan-550e8400
-```
-
----
-
-## Fetch Plan Tasks
-
-Retrieve scan tasks for a completed plan.
-
-### Request
-
-```http
-POST /v1/namespaces/{namespace}/tables/{table}/tasks
-```
-
-### Request Body
-
-```json
-{
- "plan-id": "plan-550e8400-e29b-41d4-a716-446655440000",
- "page-token": null,
- "page-size": 100
-}
-```
-
-### Request Fields
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `plan-id` | string | Yes | Plan ID from submit response |
-| `page-token` | string | No | Pagination token |
-| `page-size` | integer | No | Tasks per page (default: 100, max: 1000) |
-
-### Response
-
-```json
-{
- "tasks": [
- {
- "task-id": "task-001",
- "data-files": [
- {
- "content": "data",
- "file-path": "s3://bucket/data/part-00001.parquet",
- "file-format": "parquet",
- "file-size-in-bytes": 52428800,
- "record-count": 100000,
- "partition": {"event_day": "2024-01-15"},
- "key-metadata": null,
- "split-offsets": [0, 26214400],
- "sort-order-id": 0
- }
- ],
- "delete-files": [
- {
- "content": "position-deletes",
- "file-path": "s3://bucket/data/delete-00001.parquet",
- "file-format": "parquet",
- "file-size-in-bytes": 1048576,
- "record-count": 500
- }
- ],
- "residual-filter": {"type": "eq", "term": "event_type", "value": "click"}
- }
- ],
- "next-page-token": "eyJvZmZzZXQiOjEwMH0="
-}
-```
-
-### Task Fields
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `task-id` | string | Unique task identifier |
-| `data-files` | array | Data files to scan |
-| `delete-files` | array | Associated delete files |
-| `residual-filter` | object | Filter to apply during scan |
-
-### File Entry Fields
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `content` | string | `data`, `position-deletes`, or `equality-deletes` |
-| `file-path` | string | Storage path |
-| `file-format` | string | `parquet`, `avro`, `orc` |
-| `file-size-in-bytes` | long | File size |
-| `record-count` | long | Number of records |
-| `partition` | object | Partition values |
-| `split-offsets` | array[long] | Offsets for parallel reads |
-
-### Errors
-
-| Code | Error | Description |
-|------|-------|-------------|
-| 400 | `PlanNotCompleteException` | Plan still in progress |
-| 404 | `NoSuchPlanException` | Plan ID not found |
-
-### Example
-
-```bash
-curl -X POST http://localhost:8181/v1/namespaces/analytics/tables/events/tasks \
- -H "Content-Type: application/json" \
- -d '{"plan-id": "plan-550e8400-e29b-41d4-a716-446655440000"}'
-```
-
----
-
-## Cancel Scan Plan
-
-Cancel an in-progress scan plan.
-
-### Request
-
-```http
-DELETE /v1/namespaces/{namespace}/tables/{table}/plan/{plan-id}
-```
-
-### Response
-
-- **204 No Content**: Plan cancelled
-
-### Errors
-
-| Code | Error | Description |
-|------|-------|-------------|
-| 404 | `NoSuchPlanException` | Plan ID not found |
-
-### Example
-
-```bash
-curl -X DELETE http://localhost:8181/v1/namespaces/analytics/tables/events/plan/plan-550e8400
-```
-
----
-
-## Best Practices
-
-### Use Projections
-
-Always specify `select` to reduce data transfer:
-
-```json
-{
- "select": ["user_id", "event_time"]
-}
-```
-
-### Push Down Filters
-
-Provide filters to skip irrelevant partitions:
-
-```json
-{
- "filter": {
- "type": "and",
- "left": {"type": "gte", "term": "event_date", "value": "2024-01-01"},
- "right": {"type": "lt", "term": "event_date", "value": "2024-01-08"}
- }
-}
-```
-
-### Handle Pagination
-
-Large scans may return many tasks:
-
-```python
-page_token = None
-while True:
- response = fetch_tasks(plan_id, page_token)
- process_tasks(response['tasks'])
- page_token = response.get('next-page-token')
- if not page_token:
- break
-```
-
-### Poll Efficiently
-
-Use exponential backoff when polling:
-
-```python
-import time
-
-wait = 0.1 # Start with 100ms
-max_wait = 5.0
-
-while True:
- status = get_plan_status(plan_id)
- if status['status'] == 'complete':
- break
- time.sleep(wait)
- wait = min(wait * 2, max_wait)
-```
diff --git a/docs-site/content/docs/api/transactions.md b/docs-site/content/docs/api/transactions.md
deleted file mode 100644
index 4ef8b30..0000000
--- a/docs-site/content/docs/api/transactions.md
+++ /dev/null
@@ -1,327 +0,0 @@
----
-title: "Transactions"
-weight: 6
----
-
-# Transactions API
-
-The transactions API enables atomic multi-table commits, ensuring all-or-nothing semantics across multiple table updates.
-
-## Overview
-
-Multi-table transactions are useful for:
-
-- Maintaining referential integrity across tables
-- Coordinated schema changes
-- Atomic data pipelines that write to multiple tables
-- ETL workflows with multiple output tables
-
-## Commit Transaction
-
-Atomically commit updates to multiple tables.
-
-### Request
-
-```http
-POST /v1/transactions/commit
-```
-
-### Request Body
-
-```json
-{
- "table-changes": [
- {
- "identifier": {
- "namespace": ["analytics"],
- "name": "events"
- },
- "requirements": [
- {"type": "assert-table-uuid", "uuid": "550e8400-e29b-41d4-a716-446655440000"},
- {"type": "assert-current-schema-id", "current-schema-id": 0}
- ],
- "updates": [
- {
- "action": "add-snapshot",
- "snapshot": {
- "snapshot-id": 123456790,
- "parent-snapshot-id": 123456789,
- "timestamp-ms": 1705398600000,
- "summary": {
- "operation": "append",
- "added-data-files": "5",
- "added-records": "10000"
- },
- "manifest-list": "s3://bucket/metadata/snap-123456790.avro",
- "schema-id": 0
- }
- },
- {"action": "set-snapshot-ref", "ref-name": "main", "type": "branch", "snapshot-id": 123456790}
- ]
- },
- {
- "identifier": {
- "namespace": ["analytics"],
- "name": "event_counts"
- },
- "requirements": [
- {"type": "assert-table-uuid", "uuid": "660e8400-e29b-41d4-a716-446655440001"}
- ],
- "updates": [
- {
- "action": "add-snapshot",
- "snapshot": {
- "snapshot-id": 987654322,
- "parent-snapshot-id": 987654321,
- "timestamp-ms": 1705398600000,
- "summary": {
- "operation": "overwrite",
- "added-data-files": "1",
- "added-records": "100"
- },
- "manifest-list": "s3://bucket/metadata/snap-987654322.avro",
- "schema-id": 0
- }
- },
- {"action": "set-snapshot-ref", "ref-name": "main", "type": "branch", "snapshot-id": 987654322}
- ]
- }
- ]
-}
-```
-
-### Request Fields
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `table-changes` | array | Yes | List of table updates to commit atomically |
-
-Each table change contains:
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `identifier` | object | Yes | Table identifier (namespace + name) |
-| `requirements` | array | Yes | Preconditions that must be met |
-| `updates` | array | Yes | Updates to apply |
-
-### Requirements
-
-All requirements from the [Tables API]({{< relref "/docs/api/tables" >}}) are supported:
-
-| Type | Fields | Description |
-|------|--------|-------------|
-| `assert-create` | - | Table must not exist |
-| `assert-table-uuid` | `uuid` | UUID must match |
-| `assert-ref-snapshot-id` | `ref`, `snapshot-id` | Ref must point to snapshot |
-| `assert-last-assigned-field-id` | `last-assigned-field-id` | Last field ID must match |
-| `assert-current-schema-id` | `current-schema-id` | Schema ID must match |
-| `assert-last-assigned-partition-id` | `last-assigned-partition-id` | Partition ID must match |
-| `assert-default-spec-id` | `default-spec-id` | Default spec must match |
-| `assert-default-sort-order-id` | `default-sort-order-id` | Sort order must match |
-
-### Updates
-
-All update actions from the [Tables API]({{< relref "/docs/api/tables" >}}) are supported:
-
-| Action | Description |
-|--------|-------------|
-| `assign-uuid` | Assign table UUID |
-| `upgrade-format-version` | Upgrade format version |
-| `add-schema` | Add new schema |
-| `set-current-schema` | Set current schema |
-| `add-partition-spec` | Add partition spec |
-| `set-default-spec` | Set default partition spec |
-| `add-sort-order` | Add sort order |
-| `set-default-sort-order` | Set default sort order |
-| `add-snapshot` | Add snapshot |
-| `set-snapshot-ref` | Set snapshot reference |
-| `remove-snapshots` | Remove snapshots |
-| `remove-snapshot-ref` | Remove snapshot reference |
-| `set-location` | Set table location |
-| `set-properties` | Update properties |
-| `remove-properties` | Remove properties |
-
-### Response (Success)
-
-```json
-{
- "commit-id": "txn-770e8400-e29b-41d4-a716-446655440002",
- "results": [
- {
- "identifier": {"namespace": ["analytics"], "name": "events"},
- "metadata-location": "s3://bucket/events/metadata/00002.metadata.json"
- },
- {
- "identifier": {"namespace": ["analytics"], "name": "event_counts"},
- "metadata-location": "s3://bucket/event_counts/metadata/00003.metadata.json"
- }
- ]
-}
-```
-
-### Response (Partial Failure)
-
-If any requirement fails, the entire transaction is rolled back:
-
-```json
-{
- "error": {
- "message": "Transaction failed: requirement not met for table analytics.events",
- "type": "CommitFailedException",
- "code": 409,
- "failed-requirements": [
- {
- "identifier": {"namespace": ["analytics"], "name": "events"},
- "requirement": {"type": "assert-current-schema-id", "current-schema-id": 0},
- "actual": {"current-schema-id": 1}
- }
- ]
- }
-}
-```
-
-### Errors
-
-| Code | Error | Description |
-|------|-------|-------------|
-| 400 | `BadRequestException` | Invalid request format |
-| 404 | `NoSuchTableException` | One or more tables not found |
-| 409 | `CommitFailedException` | One or more requirements not met |
-| 500 | `ServerError` | Internal transaction error |
-
-### Example
-
-```bash
-curl -X POST http://localhost:8181/v1/transactions/commit \
- -H "Content-Type: application/json" \
- -d '{
- "table-changes": [
- {
- "identifier": {"namespace": ["analytics"], "name": "events"},
- "requirements": [
- {"type": "assert-table-uuid", "uuid": "550e8400-e29b-41d4-a716-446655440000"}
- ],
- "updates": [
- {"action": "set-properties", "updates": {"last-modified-by": "etl-pipeline"}}
- ]
- },
- {
- "identifier": {"namespace": ["analytics"], "name": "aggregates"},
- "requirements": [
- {"type": "assert-table-uuid", "uuid": "660e8400-e29b-41d4-a716-446655440001"}
- ],
- "updates": [
- {"action": "set-properties", "updates": {"last-modified-by": "etl-pipeline"}}
- ]
- }
- ]
- }'
-```
-
----
-
-## Transaction Semantics
-
-### Atomicity
-
-All table changes in a transaction either succeed together or fail together. If any requirement check fails, no changes are applied to any table.
-
-### Isolation
-
-Transactions use optimistic concurrency control:
-
-1. Requirements are checked for all tables
-2. If all pass, updates are applied
-3. If any concurrent modification occurred, the transaction fails
-
-### Ordering
-
-Within a transaction:
-- Requirements are checked in order
-- Updates are applied in order
-- All updates complete before the transaction commits
-
-### Limitations
-
-- Maximum 100 tables per transaction
-- Maximum 1000 updates per table
-- Transaction timeout: 30 seconds (configurable)
-
----
-
-## Use Cases
-
-### Coordinated Data Pipeline
-
-Write to a fact table and update a summary table atomically:
-
-```json
-{
- "table-changes": [
- {
- "identifier": {"namespace": ["warehouse"], "name": "sales_facts"},
- "requirements": [{"type": "assert-table-uuid", "uuid": "..."}],
- "updates": [
- {"action": "add-snapshot", "snapshot": {...}},
- {"action": "set-snapshot-ref", "ref-name": "main", "type": "branch", "snapshot-id": 123}
- ]
- },
- {
- "identifier": {"namespace": ["warehouse"], "name": "daily_sales_summary"},
- "requirements": [{"type": "assert-table-uuid", "uuid": "..."}],
- "updates": [
- {"action": "add-snapshot", "snapshot": {...}},
- {"action": "set-snapshot-ref", "ref-name": "main", "type": "branch", "snapshot-id": 456}
- ]
- }
- ]
-}
-```
-
-### Schema Migration
-
-Update schemas across related tables:
-
-```json
-{
- "table-changes": [
- {
- "identifier": {"namespace": ["app"], "name": "users"},
- "requirements": [{"type": "assert-current-schema-id", "current-schema-id": 0}],
- "updates": [
- {"action": "add-schema", "schema": {...}},
- {"action": "set-current-schema", "schema-id": 1}
- ]
- },
- {
- "identifier": {"namespace": ["app"], "name": "user_profiles"},
- "requirements": [{"type": "assert-current-schema-id", "current-schema-id": 0}],
- "updates": [
- {"action": "add-schema", "schema": {...}},
- {"action": "set-current-schema", "schema-id": 1}
- ]
- }
- ]
-}
-```
-
-### Bulk Property Update
-
-Update properties across multiple tables:
-
-```json
-{
- "table-changes": [
- {
- "identifier": {"namespace": ["team_a"], "name": "table1"},
- "requirements": [],
- "updates": [{"action": "set-properties", "updates": {"owner": "new-team"}}]
- },
- {
- "identifier": {"namespace": ["team_a"], "name": "table2"},
- "requirements": [],
- "updates": [{"action": "set-properties", "updates": {"owner": "new-team"}}]
- }
- ]
-}
-```
diff --git a/docs-site/content/docs/configuration/auth.md b/docs-site/content/docs/configuration/auth.md
deleted file mode 100644
index 8fa6d43..0000000
--- a/docs-site/content/docs/configuration/auth.md
+++ /dev/null
@@ -1,319 +0,0 @@
----
-title: "Authentication"
-weight: 4
----
-
-# Authentication Configuration
-
-Configure authentication and authorization for the catalog API.
-
-## Options
-
-```yaml
-auth:
- enabled: false
- token_expiry: 1h
- signing_key: "change-me-in-production"
-
- oauth2:
- enabled: false
- issuer: ""
- client_id: ""
- client_secret: ""
-
- api_key:
- enabled: false
-```
-
-## Reference
-
-### General Options
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `enabled` | boolean | `false` | Enable authentication |
-| `token_expiry` | duration | `1h` | Access token lifetime |
-| `signing_key` | string | - | Secret key for signing tokens |
-
-### OAuth2 Options
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `oauth2.enabled` | boolean | `false` | Enable OAuth2 endpoint |
-| `oauth2.issuer` | string | `""` | External OAuth issuer URL |
-| `oauth2.client_id` | string | `""` | OAuth client ID |
-| `oauth2.client_secret` | string | `""` | OAuth client secret |
-
-### API Key Options
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `api_key.enabled` | boolean | `false` | Enable API key authentication |
-
-## Environment Variables
-
-```bash
-ICEBERG_AUTH_ENABLED=true
-ICEBERG_AUTH_TOKEN_EXPIRY=1h
-ICEBERG_AUTH_SIGNING_KEY=your-256-bit-secret-key
-ICEBERG_AUTH_OAUTH2_ENABLED=true
-ICEBERG_AUTH_OAUTH2_ISSUER=https://your-idp.example.com
-ICEBERG_AUTH_OAUTH2_CLIENT_ID=your-client-id
-ICEBERG_AUTH_OAUTH2_CLIENT_SECRET=your-client-secret
-ICEBERG_AUTH_API_KEY_ENABLED=true
-```
-
----
-
-## Enabling Authentication
-
-### Basic Setup
-
-```yaml
-auth:
- enabled: true
- token_expiry: 1h
- signing_key: "your-secure-256-bit-secret-key-here-change-me"
-```
-
-> [!CAUTION]
-> Always change the `signing_key` in production. Use a cryptographically secure random string of at least 256 bits (32 characters).
-
-Generate a secure key:
-
-```bash
-openssl rand -hex 32
-```
-
----
-
-## OAuth2 Token Exchange
-
-Enable the OAuth2 token endpoint for Iceberg clients:
-
-```yaml
-auth:
- enabled: true
- signing_key: "your-secure-key"
-
- oauth2:
- enabled: true
-```
-
-Clients can then exchange credentials for tokens:
-
-```bash
-curl -X POST http://localhost:8181/v1/oauth/tokens \
- -H "Content-Type: application/x-www-form-urlencoded" \
- -d "grant_type=client_credentials" \
- -d "client_id=my-client" \
- -d "client_secret=my-secret"
-```
-
-Response:
-
-```json
-{
- "access_token": "eyJhbGciOiJIUzI1NiIs...",
- "token_type": "bearer",
- "expires_in": 3600
-}
-```
-
----
-
-## External OAuth Provider
-
-Use an external OAuth/OIDC provider:
-
-```yaml
-auth:
- enabled: true
-
- oauth2:
- enabled: true
- issuer: "https://your-idp.example.com"
- # client_id/client_secret optional for token validation
-```
-
-Bingsan will validate tokens against the issuer's JWKS endpoint.
-
-### Supported Providers
-
-- **Auth0**: `issuer: "https://your-tenant.auth0.com/"`
-- **Okta**: `issuer: "https://your-org.okta.com"`
-- **Keycloak**: `issuer: "https://keycloak.example.com/realms/your-realm"`
-- **Azure AD**: `issuer: "https://login.microsoftonline.com/your-tenant/v2.0"`
-- **Google**: `issuer: "https://accounts.google.com"`
-
----
-
-## API Key Authentication
-
-Enable API key authentication:
-
-```yaml
-auth:
- enabled: true
-
- api_key:
- enabled: true
-```
-
-Use API keys in requests:
-
-```bash
-curl -H "Authorization: Bearer YOUR_API_KEY" \
- http://localhost:8181/v1/namespaces
-```
-
----
-
-## Client Configuration
-
-### Apache Spark
-
-```properties
-spark.sql.catalog.bingsan=org.apache.iceberg.spark.SparkCatalog
-spark.sql.catalog.bingsan.type=rest
-spark.sql.catalog.bingsan.uri=http://localhost:8181
-spark.sql.catalog.bingsan.credential=client_id:client_secret
-```
-
-### PyIceberg
-
-```python
-from pyiceberg.catalog import load_catalog
-
-catalog = load_catalog(
- "rest",
- uri="http://localhost:8181",
- credential="client_id:client_secret"
-)
-```
-
-### Trino
-
-```properties
-connector.name=iceberg
-iceberg.catalog.type=rest
-iceberg.rest-catalog.uri=http://localhost:8181
-iceberg.rest-catalog.security=OAUTH2
-iceberg.rest-catalog.oauth2.client-id=client_id
-iceberg.rest-catalog.oauth2.client-secret=client_secret
-```
-
-### cURL
-
-```bash
-# Get token
-TOKEN=$(curl -s -X POST http://localhost:8181/v1/oauth/tokens \
- -d "grant_type=client_credentials" \
- -d "client_id=my-client" \
- -d "client_secret=my-secret" | jq -r '.access_token')
-
-# Use token
-curl -H "Authorization: Bearer $TOKEN" \
- http://localhost:8181/v1/namespaces
-```
-
----
-
-## Token Expiry
-
-Configure token lifetime:
-
-```yaml
-auth:
- token_expiry: 1h # 1 hour (default)
- # or
- token_expiry: 30m # 30 minutes
- # or
- token_expiry: 24h # 24 hours
-```
-
-### Considerations
-
-- **Shorter expiry** (15m-1h): More secure, more token refreshes
-- **Longer expiry** (24h+): Fewer refreshes, larger window if token compromised
-
----
-
-## Endpoints Without Authentication
-
-These endpoints never require authentication:
-
-| Endpoint | Description |
-|----------|-------------|
-| `GET /health` | Health check |
-| `GET /ready` | Readiness check |
-| `GET /metrics` | Prometheus metrics |
-| `GET /v1/config` | Catalog configuration |
-| `POST /v1/oauth/tokens` | Token exchange |
-
----
-
-## Security Best Practices
-
-### Signing Key
-
-- Use at least 256 bits of entropy
-- Store in a secure secret store (Kubernetes Secrets, HashiCorp Vault)
-- Rotate periodically
-
-```bash
-# Generate secure key
-openssl rand -hex 32
-
-# Or using /dev/urandom
-head -c 32 /dev/urandom | xxd -p -c 64
-```
-
-### HTTPS
-
-Always use HTTPS in production:
-
-```yaml
-# Place Bingsan behind a TLS-terminating proxy/load balancer
-server:
- host: 0.0.0.0
- port: 8181
-```
-
-### Network Security
-
-- Use network policies to restrict access
-- Place behind a firewall
-- Use VPC endpoints for cloud services
-
----
-
-## Troubleshooting
-
-### Invalid Token
-
-```
-Error: invalid or expired token
-```
-
-- Check token hasn't expired
-- Verify signing key matches
-- Ensure token is properly formatted
-
-### Authentication Required
-
-```
-Error: authentication required
-```
-
-- Include `Authorization: Bearer ` header
-- Verify authentication is properly configured
-
-### Invalid Credentials
-
-```
-Error: invalid client credentials
-```
-
-- Check client_id and client_secret
-- Verify credentials are URL-encoded if using form data
diff --git a/docs-site/content/docs/configuration/catalog.md b/docs-site/content/docs/configuration/catalog.md
deleted file mode 100644
index edfe947..0000000
--- a/docs-site/content/docs/configuration/catalog.md
+++ /dev/null
@@ -1,226 +0,0 @@
----
-title: "Catalog"
-weight: 5
----
-
-# Catalog Configuration
-
-Configure catalog behavior and locking.
-
-## Options
-
-```yaml
-catalog:
- prefix: ""
- default_warehouse: ""
- lock_timeout: 30s
- lock_retry_interval: 100ms
- max_lock_retries: 100
-```
-
-## Reference
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `prefix` | string | `""` | Catalog name prefix for multi-catalog deployments |
-| `default_warehouse` | string | `""` | Default warehouse location (overrides `storage.warehouse`) |
-| `lock_timeout` | duration | `30s` | Maximum time to hold distributed locks |
-| `lock_retry_interval` | duration | `100ms` | Interval between lock acquisition retries |
-| `max_lock_retries` | integer | `100` | Maximum lock acquisition attempts |
-
-## Environment Variables
-
-```bash
-ICEBERG_CATALOG_PREFIX=my-catalog
-ICEBERG_CATALOG_DEFAULT_WAREHOUSE=s3://bucket/warehouse
-ICEBERG_CATALOG_LOCK_TIMEOUT=30s
-ICEBERG_CATALOG_LOCK_RETRY_INTERVAL=100ms
-ICEBERG_CATALOG_MAX_LOCK_RETRIES=100
-```
-
----
-
-## Catalog Prefix
-
-Use a prefix to support multiple logical catalogs on a single Bingsan instance:
-
-```yaml
-catalog:
- prefix: "production"
-```
-
-Clients can then access the catalog at:
-
-```
-GET /v1/production/namespaces
-POST /v1/production/namespaces/{namespace}/tables
-```
-
-The prefix is included in the API path but not stored in the database.
-
-### Multi-Catalog Deployment
-
-For separate production and staging catalogs:
-
-```yaml
-# production instance
-catalog:
- prefix: "prod"
-
-# staging instance
-catalog:
- prefix: "staging"
-```
-
-Or run separate Bingsan instances:
-
-```
-http://catalog.example.com/v1/prod/namespaces
-http://catalog.example.com/v1/staging/namespaces
-```
-
----
-
-## Default Warehouse
-
-Override the storage warehouse for table creation:
-
-```yaml
-storage:
- warehouse: s3://bucket/default-path
-
-catalog:
- default_warehouse: s3://bucket/special-warehouse
-```
-
-Tables created without a location will use `default_warehouse` if set, otherwise `storage.warehouse`.
-
----
-
-## Distributed Locking
-
-Bingsan uses PostgreSQL row-level locking with configurable timeouts for distributed locking, ensuring consistency when multiple instances run against the same database.
-
-{{< hint info >}}
-For detailed implementation and advanced tuning, see [Distributed Locking]({{< relref "/docs/performance/locking" >}}).
-{{< /hint >}}
-
-### How It Works
-
-1. When a table commit is requested, Bingsan acquires a lock on the table
-2. The lock prevents concurrent commits to the same table
-3. If another commit is in progress, the request retries
-4. After `max_lock_retries`, the request fails
-
-### Lock Configuration
-
-```yaml
-catalog:
- lock_timeout: 30s # How long to hold a lock
- lock_retry_interval: 100ms # Time between retries
- max_lock_retries: 100 # Maximum retry attempts
-```
-
-### Total Wait Time
-
-Maximum wait time = `lock_retry_interval` × `max_lock_retries`
-
-Default: 100ms × 100 = 10 seconds
-
-### High-Contention Settings
-
-For high-contention workloads:
-
-```yaml
-catalog:
- lock_timeout: 60s
- lock_retry_interval: 50ms
- max_lock_retries: 300
-```
-
-### Low-Latency Settings
-
-For latency-sensitive workloads:
-
-```yaml
-catalog:
- lock_timeout: 15s
- lock_retry_interval: 20ms
- max_lock_retries: 50
-```
-
----
-
-## Optimistic Concurrency
-
-In addition to distributed locks, Bingsan uses optimistic concurrency control:
-
-1. Client provides requirements (e.g., `assert-current-schema-id`)
-2. Server checks requirements against current state
-3. If requirements pass, update is applied
-4. If requirements fail, `409 Conflict` is returned
-
-### Example
-
-```json
-{
- "requirements": [
- {"type": "assert-current-schema-id", "current-schema-id": 0}
- ],
- "updates": [
- {"action": "set-current-schema", "schema-id": 1}
- ]
-}
-```
-
-If another client modified the schema between read and write, the commit fails.
-
----
-
-## Transaction Isolation
-
-Bingsan uses PostgreSQL's SERIALIZABLE isolation for multi-table transactions, ensuring:
-
-- **Atomicity**: All table changes succeed or all fail
-- **Consistency**: Requirements are checked atomically
-- **Isolation**: Concurrent transactions don't interfere
-- **Durability**: Committed changes persist
-
----
-
-## Best Practices
-
-### Lock Timeout
-
-- Set higher than your slowest expected commit
-- Include time for network latency
-- Default (30s) is suitable for most workloads
-
-### Retry Configuration
-
-- `lock_retry_interval`: Lower values reduce latency, increase database load
-- `max_lock_retries`: Higher values handle more contention, but increase wait time
-
-### Monitoring
-
-Watch for lock contention:
-
-```promql
-# Lock wait time
-rate(iceberg_db_wait_duration_seconds_total[5m])
-
-# Lock failures
-rate(iceberg_commits_total{status="lock_failed"}[5m])
-```
-
-### Troubleshooting
-
-**Commits timing out**:
-- Increase `max_lock_retries` or `lock_timeout`
-- Reduce concurrent commit rate
-- Check for long-running transactions
-
-**High latency on commits**:
-- Reduce `lock_retry_interval` for faster retries
-- Check database connection pool usage
-- Verify network latency to PostgreSQL
diff --git a/docs-site/content/docs/configuration/database.md b/docs-site/content/docs/configuration/database.md
deleted file mode 100644
index db481f4..0000000
--- a/docs-site/content/docs/configuration/database.md
+++ /dev/null
@@ -1,275 +0,0 @@
----
-title: "Database"
-weight: 2
----
-
-# Database Configuration
-
-Configure the PostgreSQL connection for metadata storage.
-
-## Options
-
-```yaml
-database:
- host: localhost
- port: 5432
- user: iceberg
- password: iceberg
- database: iceberg_catalog
- ssl_mode: disable
- max_open_conns: 25
- max_idle_conns: 5
- conn_max_lifetime: 5m
- conn_max_idle_time: 5m
-```
-
-## Reference
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `host` | string | `localhost` | PostgreSQL server hostname |
-| `port` | integer | `5432` | PostgreSQL server port |
-| `user` | string | `iceberg` | Database user |
-| `password` | string | `iceberg` | Database password |
-| `database` | string | `iceberg_catalog` | Database name |
-| `ssl_mode` | string | `disable` | SSL mode (`disable`, `require`, `verify-ca`, `verify-full`) |
-| `max_open_conns` | integer | `25` | Maximum open connections |
-| `max_idle_conns` | integer | `5` | Maximum idle connections |
-| `conn_max_lifetime` | duration | `5m` | Maximum connection lifetime |
-| `conn_max_idle_time` | duration | `5m` | Maximum connection idle time |
-
-## Environment Variables
-
-```bash
-ICEBERG_DATABASE_HOST=localhost
-ICEBERG_DATABASE_PORT=5432
-ICEBERG_DATABASE_USER=iceberg
-ICEBERG_DATABASE_PASSWORD=iceberg
-ICEBERG_DATABASE_DATABASE=iceberg_catalog
-ICEBERG_DATABASE_SSL_MODE=disable
-ICEBERG_DATABASE_MAX_OPEN_CONNS=25
-ICEBERG_DATABASE_MAX_IDLE_CONNS=5
-ICEBERG_DATABASE_CONN_MAX_LIFETIME=5m
-ICEBERG_DATABASE_CONN_MAX_IDLE_TIME=5m
-```
-
-## Connection URL
-
-Bingsan constructs a connection string from these settings:
-
-```
-postgresql://user:password@host:port/database?sslmode=ssl_mode
-```
-
-## SSL Modes
-
-| Mode | Description |
-|------|-------------|
-| `disable` | No SSL (development only) |
-| `require` | Use SSL but don't verify certificate |
-| `verify-ca` | Verify server certificate against CA |
-| `verify-full` | Verify certificate and hostname |
-
-### Production SSL Configuration
-
-```yaml
-database:
- host: postgres.example.com
- ssl_mode: verify-full
-```
-
-For AWS RDS with SSL:
-
-```yaml
-database:
- host: mydb.us-east-1.rds.amazonaws.com
- ssl_mode: require
-```
-
-## Connection Pool Tuning
-
-### Default Settings
-
-Suitable for most workloads:
-
-```yaml
-database:
- max_open_conns: 25
- max_idle_conns: 5
- conn_max_lifetime: 5m
- conn_max_idle_time: 5m
-```
-
-### High-Throughput Settings
-
-For high request rates:
-
-```yaml
-database:
- max_open_conns: 100
- max_idle_conns: 25
- conn_max_lifetime: 30m
- conn_max_idle_time: 10m
-```
-
-### Resource-Constrained Settings
-
-For limited resources:
-
-```yaml
-database:
- max_open_conns: 10
- max_idle_conns: 2
- conn_max_lifetime: 5m
- conn_max_idle_time: 1m
-```
-
-## Connection Pool Sizing
-
-### General Formula
-
-```
-max_open_conns = (2 * num_cpus) + effective_spindle_count
-```
-
-For SSDs, `effective_spindle_count` is typically 1.
-
-### Considerations
-
-- **Too Few Connections**: Requests queue, increasing latency
-- **Too Many Connections**: Overloads PostgreSQL, causes memory issues
-
-### PostgreSQL max_connections
-
-Ensure PostgreSQL's `max_connections` is higher than `max_open_conns` multiplied by the number of Bingsan instances:
-
-```sql
-SHOW max_connections; -- Default is 100
-```
-
-Increase if needed:
-
-```sql
-ALTER SYSTEM SET max_connections = 200;
--- Requires restart
-```
-
-## Connection Lifetime
-
-### conn_max_lifetime
-
-Maximum time a connection can be reused. Set lower than PostgreSQL's `idle_in_transaction_session_timeout`:
-
-```yaml
-database:
- conn_max_lifetime: 5m
-```
-
-### conn_max_idle_time
-
-Maximum time a connection can be idle before being closed:
-
-```yaml
-database:
- conn_max_idle_time: 5m
-```
-
-## Database Setup
-
-### Create Database
-
-```sql
-CREATE DATABASE iceberg_catalog;
-CREATE USER iceberg WITH PASSWORD 'your-secure-password';
-GRANT ALL PRIVILEGES ON DATABASE iceberg_catalog TO iceberg;
-```
-
-### Required Extensions
-
-Bingsan uses these PostgreSQL extensions (auto-created if permissions allow):
-
-```sql
-CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-```
-
-### Migrations
-
-Bingsan automatically runs database migrations on startup. No manual schema setup is required.
-
-## Multiple Instances
-
-When running multiple Bingsan instances against the same database:
-
-```yaml
-database:
- max_open_conns: 25 # Per instance
-```
-
-Total connections = `max_open_conns * num_instances`
-
-Ensure PostgreSQL can handle the total:
-
-```sql
-ALTER SYSTEM SET max_connections = 500; -- For 20 instances
-```
-
-## Monitoring
-
-Monitor connection pool health via Prometheus metrics:
-
-```promql
-iceberg_db_connections_open
-iceberg_db_connections_in_use
-iceberg_db_connections_idle
-iceberg_db_wait_count_total
-```
-
-### Connection Pool Saturation Alert
-
-```yaml
-- alert: DatabasePoolSaturation
- expr: iceberg_db_connections_in_use / iceberg_db_connections_open > 0.9
- for: 5m
- labels:
- severity: warning
-```
-
-## Troubleshooting
-
-### Connection Refused
-
-```
-Error: connection refused
-```
-
-- Verify PostgreSQL is running
-- Check host and port settings
-- Ensure network connectivity
-
-### Authentication Failed
-
-```
-Error: password authentication failed
-```
-
-- Verify user and password
-- Check PostgreSQL's `pg_hba.conf` for allowed connections
-
-### Too Many Connections
-
-```
-Error: too many connections for role "iceberg"
-```
-
-- Reduce `max_open_conns`
-- Increase PostgreSQL's `max_connections`
-- Check for connection leaks in other applications
-
-### SSL Certificate Error
-
-```
-Error: certificate verify failed
-```
-
-- For development: use `ssl_mode: disable`
-- For production: ensure correct SSL certificates are installed
diff --git a/docs-site/content/docs/configuration/monitoring.md b/docs-site/content/docs/configuration/monitoring.md
deleted file mode 100644
index 1b8fb8d..0000000
--- a/docs-site/content/docs/configuration/monitoring.md
+++ /dev/null
@@ -1,330 +0,0 @@
----
-title: "Monitoring"
-weight: 6
----
-
-# Monitoring Configuration
-
-Configure observability features including logging, metrics, and event streaming.
-
-## Logging
-
-Bingsan uses Go's structured logging (`slog`) with JSON output by default.
-
-### Log Levels
-
-Control log verbosity via the `debug` flag:
-
-```yaml
-server:
- debug: false # INFO level
- # or
- debug: true # DEBUG level
-```
-
-### Log Output
-
-Logs are written to stdout in JSON format:
-
-```json
-{
- "time": "2024-01-15T10:30:00.000Z",
- "level": "INFO",
- "msg": "server listening",
- "address": "0.0.0.0:8181"
-}
-```
-
-### Docker/Kubernetes
-
-Logs are automatically collected from stdout:
-
-```bash
-# Docker
-docker logs bingsan
-
-# Kubernetes
-kubectl logs deployment/bingsan
-```
-
----
-
-## Prometheus Metrics
-
-Metrics are exposed at `/metrics` in Prometheus format.
-
-### Endpoint
-
-```bash
-curl http://localhost:8181/metrics
-```
-
-### Prometheus Configuration
-
-```yaml
-scrape_configs:
- - job_name: 'bingsan'
- static_configs:
- - targets: ['bingsan:8181']
- metrics_path: /metrics
- scrape_interval: 15s
-```
-
-### Kubernetes ServiceMonitor
-
-```yaml
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
- name: bingsan
-spec:
- selector:
- matchLabels:
- app: bingsan
- endpoints:
- - port: http
- path: /metrics
- interval: 15s
-```
-
-### Key Metrics
-
-| Metric | Description |
-|--------|-------------|
-| `iceberg_catalog_http_requests_total` | Total HTTP requests |
-| `iceberg_catalog_http_request_duration_seconds` | Request latency histogram |
-| `iceberg_namespaces_total` | Total namespaces |
-| `iceberg_tables_total` | Tables per namespace |
-| `iceberg_db_connections_open` | Open database connections |
-| `iceberg_db_connections_in_use` | In-use database connections |
-
-See [Health & Metrics API]({{< relref "/docs/api/health-metrics" >}}) for the complete list.
-
----
-
-## Health Checks
-
-### Liveness Probe
-
-Check if the server is running:
-
-```yaml
-livenessProbe:
- httpGet:
- path: /health
- port: 8181
- initialDelaySeconds: 10
- periodSeconds: 10
- failureThreshold: 3
-```
-
-### Readiness Probe
-
-Check if the server can accept requests (includes database connectivity):
-
-```yaml
-readinessProbe:
- httpGet:
- path: /ready
- port: 8181
- initialDelaySeconds: 5
- periodSeconds: 5
- failureThreshold: 3
-```
-
----
-
-## Event Streaming
-
-Real-time catalog events are available via WebSocket.
-
-### Endpoint
-
-```
-ws://localhost:8181/v1/events/stream
-```
-
-### Connection
-
-```bash
-# With wscat
-wscat -c "ws://localhost:8181/v1/events/stream"
-
-# With authentication
-wscat -c "ws://localhost:8181/v1/events/stream?token=YOUR_TOKEN"
-```
-
-### Event Types
-
-- `namespace_created`, `namespace_updated`, `namespace_deleted`
-- `table_created`, `table_updated`, `table_dropped`, `table_renamed`
-- `view_created`, `view_updated`, `view_dropped`, `view_renamed`
-- `transaction_committed`
-
-See [Events API]({{< relref "/docs/api/events" >}}) for details.
-
----
-
-## Grafana Dashboards
-
-### Request Rate
-
-```promql
-sum(rate(iceberg_catalog_http_requests_total[5m])) by (status)
-```
-
-### Request Latency (p99)
-
-```promql
-histogram_quantile(0.99,
- sum(rate(iceberg_catalog_http_request_duration_seconds_bucket[5m])) by (le)
-)
-```
-
-### Error Rate
-
-```promql
-sum(rate(iceberg_catalog_http_requests_total{status=~"5.."}[5m]))
-/
-sum(rate(iceberg_catalog_http_requests_total[5m]))
-```
-
-### Database Pool Usage
-
-```promql
-iceberg_db_connections_in_use / iceberg_db_connections_max
-```
-
-### Catalog Size
-
-```promql
-iceberg_namespaces_total
-sum(iceberg_tables_total)
-```
-
----
-
-## Alerting
-
-### Prometheus Alerting Rules
-
-```yaml
-groups:
- - name: bingsan-alerts
- rules:
- - alert: BingsanDown
- expr: up{job="bingsan"} == 0
- for: 1m
- labels:
- severity: critical
- annotations:
- summary: "Bingsan is down"
-
- - alert: BingsanHighErrorRate
- expr: |
- sum(rate(iceberg_catalog_http_requests_total{status=~"5.."}[5m]))
- / sum(rate(iceberg_catalog_http_requests_total[5m])) > 0.05
- for: 5m
- labels:
- severity: warning
- annotations:
- summary: "High error rate in Bingsan"
-
- - alert: BingsanHighLatency
- expr: |
- histogram_quantile(0.99,
- sum(rate(iceberg_catalog_http_request_duration_seconds_bucket[5m])) by (le)
- ) > 1
- for: 5m
- labels:
- severity: warning
- annotations:
- summary: "High latency in Bingsan"
-
- - alert: BingsanDatabasePoolExhausted
- expr: iceberg_db_connections_in_use / iceberg_db_connections_max > 0.9
- for: 2m
- labels:
- severity: warning
- annotations:
- summary: "Database connection pool nearly exhausted"
-```
-
----
-
-## Distributed Tracing
-
-> [!NOTE]
-> OpenTelemetry tracing support is planned for a future release.
-
----
-
-## Log Aggregation
-
-### Fluentd Configuration
-
-```xml
-
- @type forward
- port 24224
-
-
-
- @type parser
- key_name log
-
- @type json
-
-
-
-
- @type elasticsearch
- host elasticsearch
- port 9200
- logstash_format true
- logstash_prefix bingsan
-
-```
-
-### Docker Logging Driver
-
-```yaml
-services:
- bingsan:
- logging:
- driver: json-file
- options:
- max-size: "100m"
- max-file: "5"
-```
-
-### Kubernetes
-
-Logs are automatically collected by most Kubernetes logging solutions (Loki, Elasticsearch, CloudWatch).
-
----
-
-## Request Tracing
-
-Each request includes a unique ID in the response headers and logs:
-
-```bash
-curl -v http://localhost:8181/v1/namespaces
-# Response headers include: X-Request-ID: abc123
-```
-
-Use this ID to correlate requests across logs:
-
-```json
-{"level":"INFO","msg":"request completed","request_id":"abc123","status":200}
-```
-
----
-
-## Best Practices
-
-1. **Set up basic monitoring first**: Health checks, error rate, latency
-2. **Alert on symptoms, not causes**: High error rate, not specific errors
-3. **Use dashboards for investigation**: Drill down from alerts to detailed metrics
-4. **Aggregate logs centrally**: Easier debugging and audit trails
-5. **Monitor database connections**: Often the bottleneck in catalog operations
diff --git a/docs-site/content/docs/configuration/storage.md b/docs-site/content/docs/configuration/storage.md
deleted file mode 100644
index 2da6f05..0000000
--- a/docs-site/content/docs/configuration/storage.md
+++ /dev/null
@@ -1,368 +0,0 @@
----
-title: "Storage"
-weight: 3
----
-
-# Storage Configuration
-
-Configure the storage backend for Iceberg data files.
-
-## Overview
-
-Bingsan supports three storage backends:
-
-- **S3** - Amazon S3 and compatible services (MinIO, R2)
-- **GCS** - Google Cloud Storage
-- **Local** - Local filesystem (development only)
-
-## Options
-
-```yaml
-storage:
- type: s3
- warehouse: s3://bucket/warehouse
-
- s3:
- endpoint: ""
- region: us-east-1
- access_key_id: ""
- secret_access_key: ""
- bucket: warehouse
- use_path_style: false
-
- gcs:
- project: ""
- credentials_file: ""
- bucket: ""
-
- local:
- root_path: /tmp/iceberg/data
-```
-
-## Reference
-
-### General Options
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `type` | string | `local` | Storage type: `s3`, `gcs`, or `local` |
-| `warehouse` | string | - | Default warehouse location for new tables |
-
-### S3 Options
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `endpoint` | string | `""` | Custom endpoint (for MinIO, R2, etc.) |
-| `region` | string | `us-east-1` | AWS region |
-| `access_key_id` | string | `""` | AWS access key (or use IAM role) |
-| `secret_access_key` | string | `""` | AWS secret key |
-| `bucket` | string | `warehouse` | S3 bucket name |
-| `use_path_style` | boolean | `false` | Use path-style URLs (for MinIO) |
-
-### GCS Options
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `project` | string | `""` | GCP project ID |
-| `credentials_file` | string | `""` | Path to service account JSON |
-| `bucket` | string | `""` | GCS bucket name |
-
-### Local Options
-
-| Option | Type | Default | Description |
-|--------|------|---------|-------------|
-| `root_path` | string | `/tmp/iceberg/data` | Root directory for data files |
-
-## Environment Variables
-
-```bash
-ICEBERG_STORAGE_TYPE=s3
-ICEBERG_STORAGE_WAREHOUSE=s3://bucket/warehouse
-
-# S3
-ICEBERG_STORAGE_S3_ENDPOINT=
-ICEBERG_STORAGE_S3_REGION=us-east-1
-ICEBERG_STORAGE_S3_ACCESS_KEY_ID=AKIA...
-ICEBERG_STORAGE_S3_SECRET_ACCESS_KEY=...
-ICEBERG_STORAGE_S3_BUCKET=warehouse
-ICEBERG_STORAGE_S3_USE_PATH_STYLE=false
-
-# GCS
-ICEBERG_STORAGE_GCS_PROJECT=my-project
-ICEBERG_STORAGE_GCS_CREDENTIALS_FILE=/path/to/credentials.json
-ICEBERG_STORAGE_GCS_BUCKET=my-bucket
-
-# Local
-ICEBERG_STORAGE_LOCAL_ROOT_PATH=/tmp/iceberg/data
-```
-
----
-
-## Amazon S3
-
-### Basic Configuration
-
-```yaml
-storage:
- type: s3
- warehouse: s3://my-bucket/warehouse
-
- s3:
- region: us-east-1
- bucket: my-bucket
-```
-
-### With IAM Role (Recommended)
-
-When running on EC2, ECS, EKS, or Lambda with an IAM role attached:
-
-```yaml
-storage:
- type: s3
- warehouse: s3://my-bucket/warehouse
-
- s3:
- region: us-east-1
- bucket: my-bucket
- # No credentials needed - uses instance role
-```
-
-### With Access Keys
-
-```yaml
-storage:
- type: s3
- warehouse: s3://my-bucket/warehouse
-
- s3:
- region: us-east-1
- bucket: my-bucket
- access_key_id: "AKIAIOSFODNN7EXAMPLE"
- secret_access_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
-```
-
-### Required IAM Permissions
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Action": [
- "s3:GetObject",
- "s3:PutObject",
- "s3:DeleteObject",
- "s3:ListBucket"
- ],
- "Resource": [
- "arn:aws:s3:::my-bucket",
- "arn:aws:s3:::my-bucket/*"
- ]
- }
- ]
-}
-```
-
----
-
-## MinIO
-
-MinIO is S3-compatible and works with the S3 backend:
-
-```yaml
-storage:
- type: s3
- warehouse: s3://warehouse/data
-
- s3:
- endpoint: "http://minio:9000"
- region: us-east-1
- access_key_id: "minioadmin"
- secret_access_key: "minioadmin"
- bucket: warehouse
- use_path_style: true
-```
-
-Key differences from S3:
-- Set `endpoint` to MinIO address
-- Enable `use_path_style: true`
-
----
-
-## Cloudflare R2
-
-R2 is S3-compatible:
-
-```yaml
-storage:
- type: s3
- warehouse: s3://my-r2-bucket/warehouse
-
- s3:
- endpoint: "https://ACCOUNT_ID.r2.cloudflarestorage.com"
- region: auto
- access_key_id: "YOUR_R2_ACCESS_KEY"
- secret_access_key: "YOUR_R2_SECRET_KEY"
- bucket: my-r2-bucket
- use_path_style: false
-```
-
----
-
-## Google Cloud Storage
-
-### Basic Configuration
-
-```yaml
-storage:
- type: gcs
- warehouse: gs://my-bucket/warehouse
-
- gcs:
- project: my-gcp-project
- bucket: my-bucket
-```
-
-### With Service Account
-
-```yaml
-storage:
- type: gcs
- warehouse: gs://my-bucket/warehouse
-
- gcs:
- project: my-gcp-project
- bucket: my-bucket
- credentials_file: /path/to/service-account.json
-```
-
-### With Default Credentials
-
-On GCE/GKE with attached service account:
-
-```yaml
-storage:
- type: gcs
- warehouse: gs://my-bucket/warehouse
-
- gcs:
- project: my-gcp-project
- bucket: my-bucket
- # No credentials_file - uses default credentials
-```
-
-### Required Permissions
-
-The service account needs these roles:
-- `roles/storage.objectAdmin` (or custom role with equivalent permissions)
-
-Specific permissions needed:
-- `storage.objects.get`
-- `storage.objects.create`
-- `storage.objects.delete`
-- `storage.objects.list`
-- `storage.buckets.get`
-
----
-
-## Local Storage
-
-> [!WARNING]
-> Local storage is intended for development and testing only. Do not use in production.
-
-### Configuration
-
-```yaml
-storage:
- type: local
- warehouse: file:///data/warehouse
-
- local:
- root_path: /data/warehouse
-```
-
-### Docker Volume
-
-When using Docker, mount a volume:
-
-```yaml
-# docker-compose.yml
-services:
- bingsan:
- volumes:
- - ./data:/data/warehouse
-```
-
----
-
-## Warehouse Location
-
-The `warehouse` setting defines the default location for new tables.
-
-### Format
-
-- S3: `s3://bucket/path`
-- GCS: `gs://bucket/path`
-- Local: `file:///absolute/path`
-
-### Per-Table Locations
-
-Tables can override the warehouse location:
-
-```bash
-curl -X POST http://localhost:8181/v1/namespaces/analytics/tables \
- -H "Content-Type: application/json" \
- -d '{
- "name": "events",
- "location": "s3://different-bucket/custom-path/events",
- "schema": {...}
- }'
-```
-
----
-
-## Vended Credentials
-
-When authentication is enabled, Bingsan can vend temporary credentials to clients:
-
-```bash
-curl http://localhost:8181/v1/namespaces/analytics/tables/events/credentials \
- -H "Authorization: Bearer YOUR_TOKEN" \
- -H "X-Iceberg-Access-Delegation: vended-credentials"
-```
-
-This requires appropriate IAM policies allowing AssumeRole or token generation.
-
----
-
-## Best Practices
-
-### S3
-
-- Use IAM roles instead of access keys
-- Enable server-side encryption
-- Use VPC endpoints for improved security and performance
-- Enable versioning for data protection
-
-### GCS
-
-- Use Workload Identity on GKE
-- Configure lifecycle policies for old metadata files
-- Use regional buckets for lower latency
-
-### Multi-Region
-
-For multi-region deployments:
-
-```yaml
-storage:
- type: s3
- warehouse: s3://my-bucket/warehouse
-
- s3:
- region: us-west-2
- bucket: my-bucket-us-west
-```
-
-Configure separate Bingsan instances per region pointing to regional buckets.
diff --git a/docs-site/content/docs/deployment/docker.md b/docs-site/content/docs/deployment/docker.md
deleted file mode 100644
index 6a8bf9d..0000000
--- a/docs-site/content/docs/deployment/docker.md
+++ /dev/null
@@ -1,400 +0,0 @@
----
-title: "Docker"
-weight: 1
----
-
-# Docker Deployment
-
-Deploy Bingsan using Docker or Docker Compose.
-
-## Docker Compose (Development)
-
-The fastest way to get started for development and testing.
-
-### Prerequisites
-
-- Docker Engine 20.10+
-- Docker Compose v2.0+
-
-### Quick Start
-
-```bash
-# Clone the repository
-git clone https://github.com/kimuyb/bingsan.git
-cd bingsan
-
-# Copy configuration
-cp config.example.yaml config.yaml
-
-# Start services
-docker compose -f deployments/docker/docker-compose.yml up -d
-```
-
-### docker-compose.yml
-
-```yaml
-version: '3.8'
-
-services:
- bingsan:
- image: ghcr.io/kimuyb/bingsan:latest
- # Or build from source:
- # build:
- # context: .
- # dockerfile: Dockerfile
- ports:
- - "8181:8181"
- environment:
- - ICEBERG_SERVER_HOST=0.0.0.0
- - ICEBERG_SERVER_PORT=8181
- - ICEBERG_DATABASE_HOST=postgres
- - ICEBERG_DATABASE_PORT=5432
- - ICEBERG_DATABASE_USER=iceberg
- - ICEBERG_DATABASE_PASSWORD=iceberg
- - ICEBERG_DATABASE_DATABASE=iceberg_catalog
- - ICEBERG_STORAGE_TYPE=local
- - ICEBERG_STORAGE_WAREHOUSE=file:///data/warehouse
- volumes:
- - warehouse-data:/data/warehouse
- depends_on:
- postgres:
- condition: service_healthy
- healthcheck:
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:8181/health"]
- interval: 10s
- timeout: 5s
- retries: 5
-
- postgres:
- image: postgres:15-alpine
- environment:
- - POSTGRES_USER=iceberg
- - POSTGRES_PASSWORD=iceberg
- - POSTGRES_DB=iceberg_catalog
- ports:
- - "5432:5432"
- volumes:
- - postgres-data:/var/lib/postgresql/data
- healthcheck:
- test: ["CMD-SHELL", "pg_isready -U iceberg -d iceberg_catalog"]
- interval: 5s
- timeout: 5s
- retries: 5
-
-volumes:
- warehouse-data:
- postgres-data:
-```
-
-### Managing the Stack
-
-```bash
-# View logs
-docker compose -f deployments/docker/docker-compose.yml logs -f
-
-# Stop services
-docker compose -f deployments/docker/docker-compose.yml down
-
-# Stop and remove volumes
-docker compose -f deployments/docker/docker-compose.yml down -v
-
-# Rebuild after code changes
-docker compose -f deployments/docker/docker-compose.yml build --no-cache
-docker compose -f deployments/docker/docker-compose.yml up -d
-```
-
----
-
-## Docker Compose with S3 (MinIO)
-
-For testing with S3-compatible storage:
-
-```yaml
-version: '3.8'
-
-services:
- bingsan:
- image: ghcr.io/kimuyb/bingsan:latest
- ports:
- - "8181:8181"
- environment:
- - ICEBERG_DATABASE_HOST=postgres
- - ICEBERG_DATABASE_PORT=5432
- - ICEBERG_DATABASE_USER=iceberg
- - ICEBERG_DATABASE_PASSWORD=iceberg
- - ICEBERG_DATABASE_DATABASE=iceberg_catalog
- - ICEBERG_STORAGE_TYPE=s3
- - ICEBERG_STORAGE_WAREHOUSE=s3://warehouse/data
- - ICEBERG_STORAGE_S3_ENDPOINT=http://minio:9000
- - ICEBERG_STORAGE_S3_ACCESS_KEY_ID=minioadmin
- - ICEBERG_STORAGE_S3_SECRET_ACCESS_KEY=minioadmin
- - ICEBERG_STORAGE_S3_BUCKET=warehouse
- - ICEBERG_STORAGE_S3_USE_PATH_STYLE=true
- - ICEBERG_STORAGE_S3_REGION=us-east-1
- depends_on:
- postgres:
- condition: service_healthy
- minio:
- condition: service_healthy
-
- postgres:
- image: postgres:15-alpine
- environment:
- - POSTGRES_USER=iceberg
- - POSTGRES_PASSWORD=iceberg
- - POSTGRES_DB=iceberg_catalog
- volumes:
- - postgres-data:/var/lib/postgresql/data
- healthcheck:
- test: ["CMD-SHELL", "pg_isready -U iceberg -d iceberg_catalog"]
- interval: 5s
- timeout: 5s
- retries: 5
-
- minio:
- image: minio/minio:latest
- ports:
- - "9000:9000"
- - "9001:9001"
- environment:
- - MINIO_ROOT_USER=minioadmin
- - MINIO_ROOT_PASSWORD=minioadmin
- command: server /data --console-address ":9001"
- volumes:
- - minio-data:/data
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
- interval: 5s
- timeout: 5s
- retries: 5
-
- minio-init:
- image: minio/mc:latest
- depends_on:
- minio:
- condition: service_healthy
- entrypoint: >
- /bin/sh -c "
- mc alias set myminio http://minio:9000 minioadmin minioadmin;
- mc mb myminio/warehouse --ignore-existing;
- exit 0;
- "
-
-volumes:
- postgres-data:
- minio-data:
-```
-
----
-
-## Standalone Docker
-
-Run Bingsan as a standalone container (requires external PostgreSQL).
-
-### Pull and Run
-
-```bash
-# Pull the image
-docker pull ghcr.io/kimuyb/bingsan:latest
-
-# Run with environment variables
-docker run -d \
- --name bingsan \
- -p 8181:8181 \
- -e ICEBERG_DATABASE_HOST=your-postgres-host \
- -e ICEBERG_DATABASE_PORT=5432 \
- -e ICEBERG_DATABASE_USER=iceberg \
- -e ICEBERG_DATABASE_PASSWORD=your-password \
- -e ICEBERG_DATABASE_DATABASE=iceberg_catalog \
- -e ICEBERG_STORAGE_TYPE=s3 \
- -e ICEBERG_STORAGE_WAREHOUSE=s3://your-bucket/warehouse \
- -e ICEBERG_STORAGE_S3_REGION=us-east-1 \
- ghcr.io/kimuyb/bingsan:latest
-```
-
-### With Configuration File
-
-```bash
-docker run -d \
- --name bingsan \
- -p 8181:8181 \
- -v $(pwd)/config.yaml:/app/config.yaml:ro \
- ghcr.io/kimuyb/bingsan:latest
-```
-
-### Build from Source
-
-```bash
-# Build the image
-docker build -t bingsan:local .
-
-# Run
-docker run -d \
- --name bingsan \
- -p 8181:8181 \
- -e ICEBERG_DATABASE_HOST=host.docker.internal \
- bingsan:local
-```
-
----
-
-## Dockerfile
-
-```dockerfile
-FROM golang:1.25-alpine AS builder
-
-WORKDIR /app
-
-# Copy go.mod and go.sum
-COPY go.mod go.sum ./
-RUN go mod download
-
-# Copy source code
-COPY . .
-
-# Build
-RUN CGO_ENABLED=0 GOOS=linux go build -o /iceberg-catalog ./cmd/iceberg-catalog
-
-# Runtime image
-FROM alpine:3.19
-
-RUN apk --no-cache add ca-certificates wget
-
-WORKDIR /app
-
-COPY --from=builder /iceberg-catalog /app/iceberg-catalog
-
-EXPOSE 8181
-
-CMD ["/app/iceberg-catalog"]
-```
-
----
-
-## Production Docker Deployment
-
-### Recommendations
-
-1. **Use specific image tags**: Don't use `latest` in production
-2. **Resource limits**: Set memory and CPU limits
-3. **Health checks**: Configure container health checks
-4. **Logging**: Use json-file or external logging driver
-5. **Secrets**: Use Docker secrets or external secret management
-
-### Example Production Compose
-
-```yaml
-version: '3.8'
-
-services:
- bingsan:
- image: ghcr.io/kimuyb/bingsan:v1.0.0
- deploy:
- replicas: 2
- resources:
- limits:
- memory: 512M
- cpus: '1.0'
- reservations:
- memory: 128M
- cpus: '0.25'
- restart_policy:
- condition: on-failure
- delay: 5s
- max_attempts: 3
- ports:
- - "8181:8181"
- environment:
- - ICEBERG_DATABASE_HOST=postgres
- - ICEBERG_DATABASE_PASSWORD_FILE=/run/secrets/db_password
- secrets:
- - db_password
- logging:
- driver: json-file
- options:
- max-size: "100m"
- max-file: "5"
- healthcheck:
- test: ["CMD", "wget", "-q", "--spider", "http://localhost:8181/health"]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 10s
-
-secrets:
- db_password:
- external: true
-```
-
----
-
-## Networking
-
-### Bridge Network (Default)
-
-Services communicate via container names:
-
-```yaml
-services:
- bingsan:
- environment:
- - ICEBERG_DATABASE_HOST=postgres # Container name
-```
-
-### Host Network
-
-For maximum performance (Linux only):
-
-```yaml
-services:
- bingsan:
- network_mode: host
- environment:
- - ICEBERG_DATABASE_HOST=localhost
- - ICEBERG_SERVER_PORT=8181
-```
-
-### External Network
-
-Connect to existing network:
-
-```yaml
-networks:
- default:
- external: true
- name: my-network
-```
-
----
-
-## Troubleshooting
-
-### Container Won't Start
-
-```bash
-# Check logs
-docker logs bingsan
-
-# Check if port is in use
-lsof -i :8181
-```
-
-### Database Connection Failed
-
-```bash
-# Test PostgreSQL connectivity
-docker exec bingsan nc -zv postgres 5432
-
-# Check environment variables
-docker exec bingsan env | grep ICEBERG_DATABASE
-```
-
-### Health Check Failing
-
-```bash
-# Test health endpoint
-docker exec bingsan wget -q -O- http://localhost:8181/health
-
-# Test readiness endpoint
-docker exec bingsan wget -q -O- http://localhost:8181/ready
-```
diff --git a/docs-site/content/docs/deployment/kubernetes.md b/docs-site/content/docs/deployment/kubernetes.md
deleted file mode 100644
index 674dd37..0000000
--- a/docs-site/content/docs/deployment/kubernetes.md
+++ /dev/null
@@ -1,589 +0,0 @@
----
-title: "Kubernetes"
-weight: 2
----
-
-# Kubernetes Deployment
-
-Deploy Bingsan on Kubernetes for production workloads.
-
-## Prerequisites
-
-- Kubernetes 1.24+
-- kubectl configured
-- PostgreSQL database (managed or self-hosted)
-- Object storage (S3/GCS)
-
-## Quick Start
-
-### 1. Create Namespace
-
-```bash
-kubectl create namespace bingsan
-```
-
-### 2. Create Secrets
-
-```bash
-# Database credentials
-kubectl create secret generic bingsan-db \
- --namespace bingsan \
- --from-literal=host=postgres.example.com \
- --from-literal=port=5432 \
- --from-literal=user=iceberg \
- --from-literal=password=your-password \
- --from-literal=database=iceberg_catalog
-
-# S3 credentials (if using static credentials)
-kubectl create secret generic bingsan-s3 \
- --namespace bingsan \
- --from-literal=access-key-id=AKIA... \
- --from-literal=secret-access-key=...
-```
-
-### 3. Create ConfigMap
-
-```bash
-kubectl apply -f - < 0.05
- for: 5m
- labels:
- severity: warning
-```
-
----
-
-## Troubleshooting
-
-### Check Pod Status
-
-```bash
-kubectl get pods -n bingsan
-kubectl describe pod -n bingsan bingsan-xxx
-```
-
-### View Logs
-
-```bash
-kubectl logs -n bingsan -l app=bingsan --tail=100 -f
-```
-
-### Test Connectivity
-
-```bash
-# Port forward for local testing
-kubectl port-forward -n bingsan svc/bingsan 8181:8181
-
-# Test health
-curl http://localhost:8181/health
-```
-
-### Debug Container
-
-```bash
-kubectl exec -it -n bingsan deployment/bingsan -- /bin/sh
-```
diff --git a/docs-site/content/docs/api/_index.md b/docs-site/content/docs/en/api/index.mdx
similarity index 61%
rename from docs-site/content/docs/api/_index.md
rename to docs-site/content/docs/en/api/index.mdx
index ae6a827..748cbaf 100644
--- a/docs-site/content/docs/api/_index.md
+++ b/docs-site/content/docs/en/api/index.mdx
@@ -1,7 +1,6 @@
---
-title: "API Reference"
-weight: 2
-bookCollapseSection: true
+title: API Reference
+description: Complete REST API documentation for Bingsan, compliant with the Apache Iceberg REST Catalog specification
---
# API Reference
@@ -22,7 +21,7 @@ When authentication is enabled, include the bearer token in requests:
curl -H "Authorization: Bearer " http://localhost:8181/v1/namespaces
```
-See [Authentication Configuration]({{< relref "/docs/configuration/auth" >}}) for details.
+See [Authentication Configuration](/docs/configuration/auth) for details.
## Content Type
@@ -36,21 +35,9 @@ Content-Type: application/json
### Core Operations
-- [Configuration]({{< relref "/docs/api/configuration" >}}) - Catalog configuration endpoint
-- [Namespaces]({{< relref "/docs/api/namespaces" >}}) - Namespace CRUD operations
-- [Tables]({{< relref "/docs/api/tables" >}}) - Table management and commits
-- [Views]({{< relref "/docs/api/views" >}}) - View management
-
-### Advanced Operations
-
-- [Scan Planning]({{< relref "/docs/api/scan-planning" >}}) - Server-side scan planning
-- [Transactions]({{< relref "/docs/api/transactions" >}}) - Multi-table atomic commits
-- [Events]({{< relref "/docs/api/events" >}}) - Real-time WebSocket event streaming
-
-### Operational
-
-- [Health & Metrics]({{< relref "/docs/api/health-metrics" >}}) - Health checks and Prometheus metrics
-- [OAuth]({{< relref "/docs/api/oauth" >}}) - Token exchange endpoints
+- [Namespaces](/docs/api/namespaces) - Namespace CRUD operations
+- [Tables](/docs/api/tables) - Table management and commits
+- [Views](/docs/api/views) - View management
## Error Responses
diff --git a/docs-site/content/docs/en/api/meta.json b/docs-site/content/docs/en/api/meta.json
new file mode 100644
index 0000000..56c2c80
--- /dev/null
+++ b/docs-site/content/docs/en/api/meta.json
@@ -0,0 +1,9 @@
+{
+ "title": "API Reference",
+ "pages": [
+ "index",
+ "namespaces",
+ "tables",
+ "views"
+ ]
+}
diff --git a/docs-site/content/docs/api/namespaces.md b/docs-site/content/docs/en/api/namespaces.mdx
similarity index 87%
rename from docs-site/content/docs/api/namespaces.md
rename to docs-site/content/docs/en/api/namespaces.mdx
index 85b3da4..7325070 100644
--- a/docs-site/content/docs/api/namespaces.md
+++ b/docs-site/content/docs/en/api/namespaces.mdx
@@ -1,6 +1,6 @@
---
-title: "Namespaces"
-weight: 2
+title: Namespaces API
+description: Namespace CRUD operations for managing data containers
---
# Namespaces API
@@ -23,7 +23,7 @@ GET /v1/namespaces?pageToken={token}&pageSize={size}
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
-| `parent` | string | No | Parent namespace to filter children (URL-encoded, e.g., `analytics%00events`) |
+| `parent` | string | No | Parent namespace to filter children |
| `pageToken` | string | No | Pagination token from previous response |
| `pageSize` | integer | No | Maximum number of results (default: 100) |
@@ -129,7 +129,7 @@ GET /v1/namespaces/{namespace}
| Parameter | Type | Description |
|-----------|------|-------------|
-| `namespace` | string | URL-encoded namespace (use `%1F` for nested, e.g., `analytics%1Fevents`) |
+| `namespace` | string | URL-encoded namespace |
### Response
@@ -224,14 +224,6 @@ POST /v1/namespaces/{namespace}/properties
}
```
-### Response Fields
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `updated` | array[string] | Properties that were updated |
-| `removed` | array[string] | Properties that were removed |
-| `missing` | array[string] | Properties in `removals` that didn't exist |
-
### Example
```bash
@@ -272,5 +264,6 @@ DELETE /v1/namespaces/{namespace}
curl -X DELETE http://localhost:8181/v1/namespaces/analytics
```
-> [!WARNING]
-> You must delete all tables and views in a namespace before deleting the namespace itself.
+
+You must delete all tables and views in a namespace before deleting the namespace itself.
+
diff --git a/docs-site/content/docs/api/tables.md b/docs-site/content/docs/en/api/tables.mdx
similarity index 72%
rename from docs-site/content/docs/api/tables.md
rename to docs-site/content/docs/en/api/tables.mdx
index 9af1dc5..c48a74c 100644
--- a/docs-site/content/docs/api/tables.md
+++ b/docs-site/content/docs/en/api/tables.mdx
@@ -1,6 +1,6 @@
---
-title: "Tables"
-weight: 3
+title: Tables API
+description: Table management operations including CRUD and commits
---
# Tables API
@@ -84,13 +84,6 @@ POST /v1/namespaces/{namespace}/tables
{"source-id": 4, "field-id": 1000, "name": "event_day", "transform": "day"}
]
},
- "write-order": {
- "order-id": 0,
- "fields": [
- {"source-id": 4, "direction": "asc", "null-order": "nulls-last"}
- ]
- },
- "stage-create": false,
"properties": {
"format-version": "2",
"write.parquet.compression-codec": "zstd"
@@ -122,18 +115,10 @@ Returns the full table metadata:
"table-uuid": "550e8400-e29b-41d4-a716-446655440000",
"location": "s3://bucket/warehouse/analytics/user_events",
"last-updated-ms": 1705312200000,
- "schema": { ... },
+ "schema": { "..." },
"current-schema-id": 0,
- "schemas": [ ... ],
- "partition-spec": [ ... ],
- "default-spec-id": 0,
- "partition-specs": [ ... ],
- "last-partition-id": 1000,
- "properties": { ... },
- "current-snapshot-id": -1,
- "snapshots": [],
- "sort-orders": [ ... ],
- "default-sort-order-id": 0
+ "partition-spec": [],
+ "properties": {}
}
}
```
@@ -184,19 +169,6 @@ GET /v1/namespaces/{namespace}/tables/{table}?snapshots={snapshots}
"format-version": 2,
"table-uuid": "550e8400-e29b-41d4-a716-446655440000",
"location": "s3://bucket/warehouse/analytics/events",
- "last-updated-ms": 1705312200000,
- "schema": {
- "type": "struct",
- "schema-id": 0,
- "fields": [
- {"id": 1, "name": "id", "required": true, "type": "long"},
- {"id": 2, "name": "data", "required": false, "type": "string"}
- ]
- },
- "current-schema-id": 0,
- "partition-spec": [],
- "default-spec-id": 0,
- "properties": {},
"current-snapshot-id": 123456789,
"snapshots": [
{
@@ -206,15 +178,9 @@ GET /v1/namespaces/{namespace}/tables/{table}?snapshots={snapshots}
"operation": "append",
"added-data-files": "10",
"added-records": "1000"
- },
- "manifest-list": "s3://bucket/.../snap-123456789-uuid.avro"
+ }
}
- ],
- "snapshot-log": [ ... ],
- "metadata-log": [ ... ]
- },
- "config": {
- "client.factory": "org.apache.iceberg.aws.glue.GlueCatalog"
+ ]
}
}
```
@@ -285,8 +251,7 @@ POST /v1/namespaces/{namespace}/tables/{table}
]
}
},
- {"action": "set-current-schema", "schema-id": 1},
- {"action": "set-properties", "updates": {"owner": "new-team"}}
+ {"action": "set-current-schema", "schema-id": 1}
]
}
```
@@ -298,36 +263,24 @@ POST /v1/namespaces/{namespace}/tables/{table}
| `assert-create` | - | Assert table doesn't exist |
| `assert-table-uuid` | `uuid` | Assert table UUID matches |
| `assert-ref-snapshot-id` | `ref`, `snapshot-id` | Assert ref points to snapshot |
-| `assert-last-assigned-field-id` | `last-assigned-field-id` | Assert last field ID |
| `assert-current-schema-id` | `current-schema-id` | Assert current schema ID |
-| `assert-last-assigned-partition-id` | `last-assigned-partition-id` | Assert last partition ID |
| `assert-default-spec-id` | `default-spec-id` | Assert default partition spec |
-| `assert-default-sort-order-id` | `default-sort-order-id` | Assert default sort order |
### Update Actions
| Action | Fields | Description |
|--------|--------|-------------|
-| `assign-uuid` | `uuid` | Assign table UUID |
| `upgrade-format-version` | `format-version` | Upgrade format version |
| `add-schema` | `schema` | Add new schema |
| `set-current-schema` | `schema-id` | Set current schema |
| `add-partition-spec` | `spec` | Add partition spec |
| `set-default-spec` | `spec-id` | Set default partition spec |
-| `add-sort-order` | `sort-order` | Add sort order |
-| `set-default-sort-order` | `sort-order-id` | Set default sort order |
| `add-snapshot` | `snapshot` | Add snapshot |
| `set-snapshot-ref` | `ref-name`, `type`, `snapshot-id` | Set snapshot reference |
| `remove-snapshots` | `snapshot-ids` | Remove snapshots |
-| `remove-snapshot-ref` | `ref-name` | Remove snapshot reference |
-| `set-location` | `location` | Set table location |
| `set-properties` | `updates` | Update properties |
| `remove-properties` | `removals` | Remove properties |
-### Response
-
-Returns updated metadata (same format as Load Table).
-
### Errors
| Code | Error | Description |
@@ -437,77 +390,3 @@ curl -X POST http://localhost:8181/v1/tables/rename \
"destination": {"namespace": ["analytics"], "name": "user_events"}
}'
```
-
----
-
-## Report Metrics
-
-Report table-level metrics (read/write statistics).
-
-### Request
-
-```http
-POST /v1/namespaces/{namespace}/tables/{table}/metrics
-```
-
-### Request Body
-
-```json
-{
- "report-type": "scan",
- "table-name": "analytics.events",
- "snapshot-id": 123456789,
- "filter": "event_time > '2024-01-01'",
- "schema-id": 0,
- "projected-field-ids": [1, 2, 3],
- "projected-field-names": ["id", "data", "created_at"],
- "metrics": {
- "total-planning-duration": 150,
- "total-data-manifests": 10,
- "total-delete-manifests": 2,
- "scanned-data-manifests": 5,
- "skipped-data-manifests": 5,
- "total-file-size-in-bytes": 1073741824,
- "total-data-files": 100,
- "total-delete-files": 10
- }
-}
-```
-
-### Response
-
-- **204 No Content**: Metrics accepted
-
----
-
-## Load Credentials
-
-Get temporary credentials for accessing table data.
-
-### Request
-
-```http
-GET /v1/namespaces/{namespace}/tables/{table}/credentials
-```
-
-### Headers
-
-| Header | Description |
-|--------|-------------|
-| `X-Iceberg-Access-Delegation` | Credential type (e.g., `vended-credentials`) |
-
-### Response
-
-```json
-{
- "config": {
- "s3.access-key-id": "ASIA...",
- "s3.secret-access-key": "...",
- "s3.session-token": "...",
- "s3.region": "us-east-1"
- }
-}
-```
-
-> [!NOTE]
-> Credentials are vended based on storage configuration. Requires appropriate IAM roles and policies.
diff --git a/docs-site/content/docs/api/views.md b/docs-site/content/docs/en/api/views.mdx
similarity index 76%
rename from docs-site/content/docs/api/views.md
rename to docs-site/content/docs/en/api/views.mdx
index c0cba51..e69b9a9 100644
--- a/docs-site/content/docs/api/views.md
+++ b/docs-site/content/docs/en/api/views.mdx
@@ -1,6 +1,6 @@
---
-title: "Views"
-weight: 4
+title: Views API
+description: View management operations for named SQL queries
---
# Views API
@@ -60,7 +60,6 @@ POST /v1/namespaces/{namespace}/views
```json
{
"name": "daily_events",
- "location": "s3://bucket/warehouse/analytics/views/daily_events",
"schema": {
"type": "struct",
"schema-id": 0,
@@ -89,8 +88,7 @@ POST /v1/namespaces/{namespace}/views
"default-namespace": ["analytics"]
},
"properties": {
- "owner": "data-team",
- "description": "Daily event aggregation"
+ "owner": "data-team"
}
}
```
@@ -117,10 +115,6 @@ POST /v1/namespaces/{namespace}/views
| `default-catalog` | string | No | Default catalog for unqualified names |
| `default-namespace` | array | No | Default namespace |
-### Response
-
-Returns the full view metadata.
-
### Example
```bash
@@ -167,31 +161,20 @@ GET /v1/namespaces/{namespace}/views/{view}
```json
{
- "metadata-location": "s3://bucket/warehouse/analytics/views/daily_events/metadata/00001-uuid.metadata.json",
+ "metadata-location": "s3://bucket/warehouse/views/daily_events/metadata/00001-uuid.metadata.json",
"metadata": {
"view-uuid": "550e8400-e29b-41d4-a716-446655440000",
"format-version": 1,
- "location": "s3://bucket/warehouse/analytics/views/daily_events",
- "schemas": [
- {
- "type": "struct",
- "schema-id": 0,
- "fields": [...]
- }
- ],
+ "location": "s3://bucket/warehouse/views/daily_events",
"current-version-id": 1,
"versions": [
{
"version-id": 1,
"schema-id": 0,
"timestamp-ms": 1705312200000,
- "summary": {...},
"representations": [...]
}
],
- "version-log": [
- {"version-id": 1, "timestamp-ms": 1705312200000}
- ],
"properties": {
"owner": "data-team"
}
@@ -240,51 +223,6 @@ Replace a view's definition with a new version.
POST /v1/namespaces/{namespace}/views/{view}
```
-### Request Body
-
-```json
-{
- "identifier": {
- "namespace": ["analytics"],
- "name": "daily_events"
- },
- "requirements": [
- {"type": "assert-view-uuid", "uuid": "550e8400-e29b-41d4-a716-446655440000"}
- ],
- "updates": [
- {
- "action": "add-schema",
- "schema": {
- "type": "struct",
- "schema-id": 1,
- "fields": [
- {"id": 1, "name": "event_date", "required": true, "type": "date"},
- {"id": 2, "name": "event_count", "required": true, "type": "long"},
- {"id": 3, "name": "unique_users", "required": true, "type": "long"},
- {"id": 4, "name": "avg_events_per_user", "required": true, "type": "double"}
- ]
- }
- },
- {
- "action": "add-view-version",
- "view-version": {
- "version-id": 2,
- "schema-id": 1,
- "timestamp-ms": 1705398600000,
- "representations": [
- {
- "type": "sql",
- "sql": "SELECT DATE(event_time) AS event_date, COUNT(*) AS event_count, COUNT(DISTINCT user_id) AS unique_users, COUNT(*) / COUNT(DISTINCT user_id) AS avg_events_per_user FROM analytics.user_events GROUP BY DATE(event_time)",
- "dialect": "spark"
- }
- ]
- }
- },
- {"action": "set-current-view-version", "view-version-id": 2}
- ]
-}
-```
-
### Requirements
| Type | Fields | Description |
@@ -303,10 +241,6 @@ POST /v1/namespaces/{namespace}/views/{view}
| `set-properties` | `updates` | Update properties |
| `remove-properties` | `removals` | Remove properties |
-### Response
-
-Returns updated view metadata.
-
### Errors
| Code | Error | Description |
@@ -363,10 +297,6 @@ POST /v1/views/rename
}
```
-### Response
-
-- **200 OK**: View renamed
-
### Example
```bash
diff --git a/docs-site/content/docs/architecture/data-model.md b/docs-site/content/docs/en/architecture/data-model.mdx
similarity index 53%
rename from docs-site/content/docs/architecture/data-model.md
rename to docs-site/content/docs/en/architecture/data-model.mdx
index 9273e03..c8c1c8c 100644
--- a/docs-site/content/docs/architecture/data-model.md
+++ b/docs-site/content/docs/en/architecture/data-model.mdx
@@ -1,6 +1,6 @@
---
-title: "Data Model"
-weight: 2
+title: Data Model
+description: Database schema and metadata storage in Bingsan
---
# Data Model
@@ -39,23 +39,11 @@ Stores namespace metadata.
| Column | Type | Description |
|--------|------|-------------|
| `id` | BIGSERIAL | Primary key |
-| `name` | TEXT[] | Namespace name as array (e.g., `{analytics,events}`) |
+| `name` | TEXT[] | Namespace name as array |
| `properties` | JSONB | Namespace properties |
| `created_at` | TIMESTAMPTZ | Creation timestamp |
| `updated_at` | TIMESTAMPTZ | Last update timestamp |
-```sql
-CREATE TABLE namespaces (
- id BIGSERIAL PRIMARY KEY,
- name TEXT[] NOT NULL UNIQUE,
- properties JSONB NOT NULL DEFAULT '{}',
- created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
- updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
-);
-
-CREATE INDEX idx_namespaces_name ON namespaces USING GIN (name);
-```
-
### tables
Stores Iceberg table metadata.
@@ -71,51 +59,9 @@ Stores Iceberg table metadata.
| `created_at` | TIMESTAMPTZ | Creation timestamp |
| `updated_at` | TIMESTAMPTZ | Last update timestamp |
-```sql
-CREATE TABLE tables (
- id BIGSERIAL PRIMARY KEY,
- namespace_id BIGINT NOT NULL REFERENCES namespaces(id),
- name TEXT NOT NULL,
- table_uuid UUID NOT NULL,
- metadata_location TEXT NOT NULL,
- metadata JSONB,
- created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
- updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
- UNIQUE (namespace_id, name)
-);
-
-CREATE INDEX idx_tables_namespace ON tables (namespace_id);
-CREATE INDEX idx_tables_uuid ON tables (table_uuid);
-```
-
### views
-Stores Iceberg view metadata.
-
-| Column | Type | Description |
-|--------|------|-------------|
-| `id` | BIGSERIAL | Primary key |
-| `namespace_id` | BIGINT | Foreign key to namespaces |
-| `name` | TEXT | View name |
-| `view_uuid` | UUID | Iceberg view UUID |
-| `metadata_location` | TEXT | Path to current metadata file |
-| `metadata` | JSONB | Cached view metadata |
-| `created_at` | TIMESTAMPTZ | Creation timestamp |
-| `updated_at` | TIMESTAMPTZ | Last update timestamp |
-
-```sql
-CREATE TABLE views (
- id BIGSERIAL PRIMARY KEY,
- namespace_id BIGINT NOT NULL REFERENCES namespaces(id),
- name TEXT NOT NULL,
- view_uuid UUID NOT NULL,
- metadata_location TEXT NOT NULL,
- metadata JSONB,
- created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
- updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
- UNIQUE (namespace_id, name)
-);
-```
+Stores Iceberg view metadata with similar structure to tables.
### scan_plans
@@ -129,28 +75,6 @@ Stores scan plan state for server-side planning.
| `status` | TEXT | Plan status |
| `request` | JSONB | Original plan request |
| `tasks` | JSONB | Computed scan tasks |
-| `statistics` | JSONB | Planning statistics |
-| `created_at` | TIMESTAMPTZ | Creation timestamp |
-| `completed_at` | TIMESTAMPTZ | Completion timestamp |
-
-```sql
-CREATE TABLE scan_plans (
- id BIGSERIAL PRIMARY KEY,
- plan_id UUID NOT NULL UNIQUE,
- table_id BIGINT NOT NULL REFERENCES tables(id),
- status TEXT NOT NULL DEFAULT 'submitted',
- request JSONB NOT NULL,
- tasks JSONB,
- statistics JSONB,
- created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
- completed_at TIMESTAMPTZ
-);
-
-CREATE INDEX idx_scan_plans_table ON scan_plans (table_id);
-CREATE INDEX idx_scan_plans_status ON scan_plans (status);
-```
-
----
## Metadata Storage Strategy
@@ -172,22 +96,7 @@ Bingsan uses a hybrid approach:
### Metadata Caching
-Table metadata can be cached in PostgreSQL for faster reads:
-
-```json
-{
- "format-version": 2,
- "table-uuid": "...",
- "location": "s3://bucket/table",
- "schemas": [...],
- "current-schema-id": 0,
- ...
-}
-```
-
-This avoids reading from object storage for every request.
-
----
+Table metadata can be cached in PostgreSQL for faster reads, avoiding reading from object storage for every request.
## Locking Model
@@ -196,51 +105,22 @@ This avoids reading from object storage for every request.
PostgreSQL advisory locks ensure consistency:
```sql
--- Namespace-level lock (for CRUD)
+-- Namespace-level lock
SELECT pg_advisory_lock(hashtext('ns:' || $1));
--- Table-level lock (for commits)
-SELECT pg_advisory_xact_lock($1); -- table ID
-```
-
-### Lock Hierarchy
-
-```
-Namespace Lock
- └── Table Lock (for commits within namespace)
+-- Table-level lock
+SELECT pg_advisory_xact_lock($1);
```
Locks are held for the minimum necessary duration.
----
-
## Migrations
Bingsan uses [golang-migrate](https://github.com/golang-migrate/migrate) for schema migrations.
-### Migration Files
-
-Located in `internal/db/migrations/`:
-
-```
-001_create_namespaces.up.sql
-001_create_namespaces.down.sql
-002_create_tables.up.sql
-002_create_tables.down.sql
-...
-```
-
### Automatic Migrations
-Migrations run automatically on startup:
-
-```go
-m, _ := migrate.New(
- "file://migrations",
- databaseURL,
-)
-m.Up()
-```
+Migrations run automatically on startup.
### Manual Migrations
@@ -255,13 +135,9 @@ migrate -database "postgres://..." -path migrations up
migrate -database "postgres://..." -path migrations down 1
```
----
-
## Indexes
-### Query Patterns
-
-Indexes are optimized for common queries:
+Indexes are optimized for common query patterns:
| Query | Index Used |
|-------|------------|
@@ -271,14 +147,6 @@ Indexes are optimized for common queries:
| Get table by name | `tables_namespace_id_name_key` (UNIQUE) |
| Find table by UUID | `idx_tables_uuid` |
-### Performance Considerations
-
-- GIN index on `namespaces.name` supports prefix queries
-- Composite unique constraints double as indexes
-- UUID index enables fast lookup by Iceberg table UUID
-
----
-
## Data Lifecycle
### Namespace
@@ -293,19 +161,6 @@ Indexes are optimized for common queries:
2. **Commit**: Update `metadata_location`, optional metadata cache
3. **Drop**: Delete from `tables`, optionally purge storage
-### View
-
-Similar to tables, but stores view-specific metadata.
-
-### Scan Plan
-
-1. **Submit**: Insert into `scan_plans` with status `submitted`
-2. **Planning**: Update status to `planning`, compute tasks
-3. **Complete**: Update status to `complete`, store tasks
-4. **Cleanup**: Delete expired plans (background job)
-
----
-
## Backup and Recovery
### PostgreSQL Backup
@@ -318,16 +173,4 @@ pg_dump -h localhost -U iceberg iceberg_catalog > backup.sql
psql -h localhost -U iceberg iceberg_catalog < backup.sql
```
-### Point-in-Time Recovery
-
-Enable WAL archiving for PITR:
-
-```sql
-ALTER SYSTEM SET wal_level = replica;
-ALTER SYSTEM SET archive_mode = on;
-ALTER SYSTEM SET archive_command = 'cp %p /archive/%f';
-```
-
-### Metadata Consistency
-
Since full Iceberg metadata is in object storage, the database can be rebuilt from metadata files if needed.
diff --git a/docs-site/content/docs/architecture/_index.md b/docs-site/content/docs/en/architecture/index.mdx
similarity index 88%
rename from docs-site/content/docs/architecture/_index.md
rename to docs-site/content/docs/en/architecture/index.mdx
index 850b26a..6ef906d 100644
--- a/docs-site/content/docs/architecture/_index.md
+++ b/docs-site/content/docs/en/architecture/index.mdx
@@ -1,7 +1,6 @@
---
-title: "Architecture"
-weight: 4
-bookCollapseSection: true
+title: Architecture
+description: Understanding Bingsan's architecture for deployment and tuning
---
# Architecture
@@ -93,7 +92,7 @@ PostgreSQL row-level locking with configurable timeouts prevents concurrent modi
- Automatic retry with exponential backoff
- Handles lock conflicts gracefully
-See [Distributed Locking]({{< relref "/docs/performance/locking" >}}) for configuration details.
+See [Distributed Locking](/docs/performance/locking) for configuration details.
### Object Pooling
@@ -103,10 +102,10 @@ Memory optimization through buffer reuse:
- Reduces GC pressure under high load
- Prometheus metrics for pool health
-See [Object Pooling]({{< relref "/docs/performance/pooling" >}}) for implementation details.
+See [Object Pooling](/docs/performance/pooling) for implementation details.
## Sections
-- [Request Flow]({{< relref "/docs/architecture/request-flow" >}}) - How requests are processed
-- [Data Model]({{< relref "/docs/architecture/data-model" >}}) - Database schema and metadata storage
-- [Scalability]({{< relref "/docs/architecture/scalability" >}}) - Scaling strategies and limits
+- [Request Flow](/docs/architecture/request-flow) - How requests are processed
+- [Data Model](/docs/architecture/data-model) - Database schema and metadata storage
+- [Scalability](/docs/architecture/scalability) - Scaling strategies and limits
diff --git a/docs-site/content/docs/en/architecture/meta.json b/docs-site/content/docs/en/architecture/meta.json
new file mode 100644
index 0000000..e8a192e
--- /dev/null
+++ b/docs-site/content/docs/en/architecture/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "Architecture",
+ "pages": ["index", "request-flow", "data-model", "scalability"]
+}
diff --git a/docs-site/content/docs/architecture/request-flow.md b/docs-site/content/docs/en/architecture/request-flow.mdx
similarity index 54%
rename from docs-site/content/docs/architecture/request-flow.md
rename to docs-site/content/docs/en/architecture/request-flow.mdx
index d08a698..cd1ac04 100644
--- a/docs-site/content/docs/architecture/request-flow.md
+++ b/docs-site/content/docs/en/architecture/request-flow.mdx
@@ -1,6 +1,6 @@
---
-title: "Request Flow"
-weight: 1
+title: Request Flow
+description: How Bingsan processes HTTP requests
---
# Request Flow
@@ -42,28 +42,15 @@ Understanding how Bingsan processes requests helps with debugging and performanc
### 1. Request ID
-Assigns a unique ID to each request for tracing:
-
-```go
-c.Locals("requestId", uuid.New().String())
-```
-
-Returned in `X-Request-ID` header.
+Assigns a unique ID to each request for tracing. Returned in `X-Request-ID` header.
### 2. Recovery
-Catches panics and returns 500 Internal Server Error:
-
-```go
-recover.New(recover.Config{
- EnableStackTrace: config.Server.Debug,
-})
-```
+Catches panics and returns 500 Internal Server Error.
### 3. Prometheus Metrics
Records request metrics:
-
- `iceberg_catalog_http_requests_total` - Counter
- `iceberg_catalog_http_request_duration_seconds` - Histogram
@@ -71,45 +58,15 @@ Paths `/health`, `/ready`, `/metrics` are excluded.
### 4. CORS
-Allows cross-origin requests:
-
-```go
-cors.New(cors.Config{
- AllowOrigins: "*",
- AllowMethods: "GET,POST,PUT,DELETE,HEAD,OPTIONS",
- AllowHeaders: "Origin,Content-Type,Accept,Authorization,X-Iceberg-Access-Delegation",
-})
-```
+Allows cross-origin requests with configurable origins and methods.
### 5. Logger
-Logs request details:
-
-```json
-{
- "level": "INFO",
- "msg": "request",
- "method": "GET",
- "path": "/v1/namespaces",
- "status": 200,
- "latency": "1.234ms",
- "request_id": "abc-123"
-}
-```
+Logs request details including method, path, status, latency, and request ID.
### 6. Authentication
-When enabled, validates bearer tokens:
-
-```go
-if config.Auth.Enabled {
- v1.Use(middleware.Auth(config, db))
-}
-```
-
-Returns 401 if token is invalid or missing.
-
----
+When enabled, validates bearer tokens. Returns 401 if token is invalid or missing.
## Table Commit Flow
@@ -137,108 +94,24 @@ The most complex operation is a table commit:
│ 11. Release lock │
│ 12. Publish event │
└──────────────────────────────────────────────┘
- │
- ▼
-┌──────────────┐
-│ Client │
-└──────────────┘
```
### Lock Acquisition
-```go
-// Acquire advisory lock on table
-_, err := tx.Exec(ctx, "SELECT pg_advisory_xact_lock($1)", tableID)
+```sql
+-- Acquire advisory lock on table
+SELECT pg_advisory_xact_lock($1);
```
The lock is held for the duration of the transaction.
### Requirement Checking
-Each requirement is validated:
-
-```go
-switch req.Type {
-case "assert-table-uuid":
- if current.UUID != req.UUID {
- return CommitFailedError("UUID mismatch")
- }
-case "assert-current-schema-id":
- if current.SchemaID != req.SchemaID {
- return CommitFailedError("Schema ID mismatch")
- }
-// ... other requirements
-}
-```
-
-If any requirement fails, the entire commit is rejected.
+Each requirement is validated against the current table state. If any requirement fails, the entire commit is rejected.
### Update Application
-Updates are applied in order:
-
-```go
-for _, update := range updates {
- switch update.Action {
- case "add-snapshot":
- metadata.AddSnapshot(update.Snapshot)
- case "set-current-schema":
- metadata.SetCurrentSchemaID(update.SchemaID)
- // ... other actions
- }
-}
-```
-
----
-
-## Transaction Commit Flow
-
-Multi-table transactions have additional complexity:
-
-```
-┌──────────────┐
-│ Client │
-└──────┬───────┘
- │ POST /v1/transactions/commit
- ▼
-┌──────────────────────────────────────────────┐
-│ CommitTransaction Handler │
-├──────────────────────────────────────────────┤
-│ 1. Parse all table changes │
-│ 2. Sort tables by ID (deadlock prevention) │
-│ 3. Begin SERIALIZABLE transaction │
-│ 4. For each table: │
-│ a. Acquire lock │
-│ b. Check requirements │
-│ c. Apply updates │
-│ 5. Write all metadata files │
-│ 6. Update all database records │
-│ 7. Commit transaction │
-│ 8. Publish events │
-└──────────────────────────────────────────────┘
-```
-
-Tables are locked in consistent order to prevent deadlocks.
-
----
-
-## Event Publishing
-
-After successful operations, events are published:
-
-```go
-event := events.Event{
- Type: "table_updated",
- Timestamp: time.Now(),
- Namespace: namespace,
- Table: table,
-}
-broker.Publish(event)
-```
-
-Events are delivered to all connected WebSocket clients.
-
----
+Updates are applied in order, supporting operations like `add-snapshot`, `set-current-schema`, etc.
## Error Handling
@@ -266,39 +139,19 @@ Events are delivered to all connected WebSocket clients.
}
```
----
-
## Performance Considerations
### Connection Pooling
-Database connections are pooled:
-
-```go
-poolConfig.MaxConns = config.Database.MaxOpenConns
-poolConfig.MinConns = config.Database.MaxIdleConns
-```
+Database connections are pooled via pgx/v5 with configurable max/min connections.
### Goroutine-per-Request
-Each request runs in its own goroutine:
-
-- No thread pool limits
-- Automatic scheduling
-- Efficient memory usage
+Each request runs in its own goroutine with automatic scheduling and efficient memory usage.
### JSON Serialization
-Uses [goccy/go-json](https://github.com/goccy/go-json) for faster JSON:
-
-```go
-fiber.Config{
- JSONEncoder: json.Marshal,
- JSONDecoder: json.Unmarshal,
-}
-```
-
----
+Uses [goccy/go-json](https://github.com/goccy/go-json) for faster JSON encoding/decoding.
## Debugging
@@ -314,16 +167,8 @@ grep "request_id.*abc-123" /var/log/bingsan.log
Check the request duration in metrics:
-```promql
+```text
histogram_quantile(0.99,
rate(iceberg_catalog_http_request_duration_seconds_bucket{path="/v1/namespaces/{namespace}/tables/{table}"}[5m])
)
```
-
-### Database Queries
-
-Enable PostgreSQL query logging for slow query analysis:
-
-```sql
-ALTER SYSTEM SET log_min_duration_statement = 100;
-```
diff --git a/docs-site/content/docs/architecture/scalability.md b/docs-site/content/docs/en/architecture/scalability.mdx
similarity index 63%
rename from docs-site/content/docs/architecture/scalability.md
rename to docs-site/content/docs/en/architecture/scalability.mdx
index 618a010..7395eb1 100644
--- a/docs-site/content/docs/architecture/scalability.md
+++ b/docs-site/content/docs/en/architecture/scalability.mdx
@@ -1,6 +1,6 @@
---
-title: "Scalability"
-weight: 3
+title: Scalability
+description: Scaling strategies and capacity planning for Bingsan
---
# Scalability
@@ -61,8 +61,6 @@ spec:
averageUtilization: 70
```
----
-
## PostgreSQL Scaling
### Connection Pool Sizing
@@ -75,44 +73,10 @@ total_connections = max_open_conns × num_instances
Example: 25 connections × 10 instances = 250 connections
-Ensure PostgreSQL can handle the load:
-
-```sql
-ALTER SYSTEM SET max_connections = 500;
-```
-
-### Read Replicas
-
-For read-heavy workloads, use PostgreSQL read replicas:
-
-```yaml
-database:
- host: primary.postgres.internal # Writes
- read_host: replica.postgres.internal # Reads (future feature)
-```
-
### Connection Pooling (PgBouncer)
For many instances, use PgBouncer:
-```
-┌─────────┐ ┌─────────┐ ┌─────────┐
-│ Node 1 │ │ Node 2 │ │ Node N │
-└────┬────┘ └────┬────┘ └────┬────┘
- │ │ │
- └────────────┼────────────┘
- ▼
- ┌─────────────┐
- │ PgBouncer │
- └──────┬──────┘
- ▼
- ┌───────────────┐
- │ PostgreSQL │
- └───────────────┘
-```
-
-Configure PgBouncer for transaction pooling:
-
```ini
[databases]
iceberg_catalog = host=postgres port=5432 dbname=iceberg_catalog
@@ -123,8 +87,6 @@ max_client_conn = 1000
default_pool_size = 50
```
----
-
## Performance Characteristics
### Latency
@@ -159,8 +121,6 @@ Per instance:
| CPU | 0.2 cores | 1 core |
| Goroutines | 100-500 | 2,000 |
----
-
## Bottlenecks and Solutions
### PostgreSQL Connections
@@ -181,17 +141,6 @@ Per instance:
- Reduce write frequency to same table
- Partition workloads across tables
-### Network Latency
-
-**Symptom**: High latency despite low server load
-
-**Solutions**:
-- Deploy closer to PostgreSQL
-- Use connection pooling
-- Enable keep-alives
-
----
-
## Capacity Planning
### Estimating Instances
@@ -204,7 +153,7 @@ Example: 10,000 RPS with 5,000 RPS/instance = 3 instances × 1.5 = 5 instances
### Database Sizing
-Metadata size per table: ~10-50 KB (varies with schema complexity)
+Metadata size per table: ~10-50 KB
```
database_size ≈ num_tables × 30 KB + num_namespaces × 1 KB
@@ -212,16 +161,6 @@ database_size ≈ num_tables × 30 KB + num_namespaces × 1 KB
10,000 tables ≈ 300 MB database
-### Memory
-
-```
-memory_per_instance = base_memory + (concurrent_requests × request_memory)
- ≈ 50 MB + (500 × 100 KB)
- ≈ 100 MB
-```
-
----
-
## High Availability
### Multiple Instances
@@ -238,20 +177,6 @@ spec:
maxSurge: 1
```
-### Pod Disruption Budget
-
-```yaml
-apiVersion: policy/v1
-kind: PodDisruptionBudget
-metadata:
- name: bingsan
-spec:
- minAvailable: 2
- selector:
- matchLabels:
- app: bingsan
-```
-
### PostgreSQL HA
Use managed PostgreSQL with automatic failover:
@@ -261,25 +186,11 @@ Use managed PostgreSQL with automatic failover:
Or deploy with Patroni/Stolon for self-managed HA.
----
-
## Multi-Region
### Active-Passive
-Single primary region, standby in secondary:
-
-```
-Region A (Active) Region B (Passive)
-┌─────────┐ ┌─────────┐
-│ Bingsan │ │ Bingsan │ (standby)
-└────┬────┘ └────┬────┘
- │ │
-┌────▼────┐ ┌────▼────┐
-│ Primary │ ──replication──▶ │ Replica │
-│ DB │ │ DB │
-└─────────┘ └─────────┘
-```
+Single primary region, standby in secondary region with PostgreSQL replication.
### Active-Active (Sharded)
@@ -293,13 +204,11 @@ staging.* → Region C
Each region has its own database and Bingsan cluster.
----
-
## Monitoring for Scale
### Key Metrics
-```promql
+```text
# Request rate per instance
sum(rate(iceberg_catalog_http_requests_total[5m])) by (instance)
@@ -308,9 +217,6 @@ iceberg_db_connections_in_use / iceberg_db_connections_max
# Lock wait time
rate(iceberg_db_wait_duration_seconds_total[5m])
-
-# Request queue depth
-iceberg_catalog_http_requests_in_flight
```
### Scaling Triggers
@@ -319,16 +225,3 @@ Auto-scale based on:
- CPU > 70%
- Request latency p99 > 100ms
- Request queue > 100
-
-### Capacity Alerts
-
-```yaml
-- alert: BingsanApproachingCapacity
- expr: |
- sum(rate(iceberg_catalog_http_requests_total[5m]))
- / (count(up{job="bingsan"}) * 5000) > 0.8
- labels:
- severity: warning
- annotations:
- summary: "Bingsan cluster approaching capacity"
-```
diff --git a/docs-site/content/docs/en/configuration/auth.mdx b/docs-site/content/docs/en/configuration/auth.mdx
new file mode 100644
index 0000000..8d718ff
--- /dev/null
+++ b/docs-site/content/docs/en/configuration/auth.mdx
@@ -0,0 +1,155 @@
+---
+title: Authentication
+description: Configure authentication and authorization for Bingsan
+---
+
+# Authentication Configuration
+
+Configure authentication and authorization for the catalog API.
+
+## Options
+
+```yaml
+auth:
+ enabled: false
+ token_expiry: 1h
+ signing_key: "change-me-in-production"
+
+ oauth2:
+ enabled: false
+ issuer: ""
+ client_id: ""
+ client_secret: ""
+
+ api_key:
+ enabled: false
+```
+
+## Reference
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `enabled` | boolean | `false` | Enable authentication |
+| `token_expiry` | duration | `1h` | Access token lifetime |
+| `signing_key` | string | - | Secret key for signing tokens |
+| `oauth2.enabled` | boolean | `false` | Enable OAuth2 endpoint |
+| `oauth2.issuer` | string | `""` | External OAuth issuer URL |
+| `api_key.enabled` | boolean | `false` | Enable API key authentication |
+
+## Enabling Authentication
+
+```yaml
+auth:
+ enabled: true
+ token_expiry: 1h
+ signing_key: "your-secure-256-bit-secret-key-here"
+```
+
+
+Always change the signing_key in production. Use a cryptographically secure random string.
+
+
+Generate a secure key:
+
+```bash
+openssl rand -hex 32
+```
+
+## OAuth2 Token Exchange
+
+Enable the OAuth2 token endpoint for Iceberg clients:
+
+```yaml
+auth:
+ enabled: true
+ signing_key: "your-secure-key"
+
+ oauth2:
+ enabled: true
+```
+
+Clients exchange credentials for tokens:
+
+```bash
+curl -X POST http://localhost:8181/v1/oauth/tokens \
+ -H "Content-Type: application/x-www-form-urlencoded" \
+ -d "grant_type=client_credentials" \
+ -d "client_id=my-client" \
+ -d "client_secret=my-secret"
+```
+
+Response:
+
+```json
+{
+ "access_token": "eyJhbGciOiJIUzI1NiIs...",
+ "token_type": "bearer",
+ "expires_in": 3600
+}
+```
+
+## External OAuth Provider
+
+Use an external OAuth/OIDC provider:
+
+```yaml
+auth:
+ enabled: true
+
+ oauth2:
+ enabled: true
+ issuer: "https://your-idp.example.com"
+```
+
+### Supported Providers
+
+- **Auth0**: `issuer: "https://your-tenant.auth0.com/"`
+- **Okta**: `issuer: "https://your-org.okta.com"`
+- **Keycloak**: `issuer: "https://keycloak.example.com/realms/your-realm"`
+- **Azure AD**: `issuer: "https://login.microsoftonline.com/your-tenant/v2.0"`
+
+## Client Configuration
+
+### Apache Spark
+
+```properties
+spark.sql.catalog.bingsan=org.apache.iceberg.spark.SparkCatalog
+spark.sql.catalog.bingsan.type=rest
+spark.sql.catalog.bingsan.uri=http://localhost:8181
+spark.sql.catalog.bingsan.credential=client_id:client_secret
+```
+
+### PyIceberg
+
+```python
+from pyiceberg.catalog import load_catalog
+
+catalog = load_catalog(
+ "rest",
+ uri="http://localhost:8181",
+ credential="client_id:client_secret"
+)
+```
+
+### Trino
+
+```properties
+connector.name=iceberg
+iceberg.catalog.type=rest
+iceberg.rest-catalog.uri=http://localhost:8181
+iceberg.rest-catalog.security=OAUTH2
+iceberg.rest-catalog.oauth2.client-id=client_id
+iceberg.rest-catalog.oauth2.client-secret=client_secret
+```
+
+## Endpoints Without Authentication
+
+These endpoints never require authentication:
+
+| Endpoint | Description |
+|----------|-------------|
+| `GET /health` | Health check |
+| `GET /ready` | Readiness check |
+| `GET /metrics` | Prometheus metrics |
+| `GET /v1/config` | Catalog configuration |
+| `POST /v1/oauth/tokens` | Token exchange |
diff --git a/docs-site/content/docs/en/configuration/database.mdx b/docs-site/content/docs/en/configuration/database.mdx
new file mode 100644
index 0000000..1400ae9
--- /dev/null
+++ b/docs-site/content/docs/en/configuration/database.mdx
@@ -0,0 +1,126 @@
+---
+title: Database Configuration
+description: Configure PostgreSQL connection for metadata storage
+---
+
+# Database Configuration
+
+Configure the PostgreSQL connection for metadata storage.
+
+## Options
+
+```yaml
+database:
+ host: localhost
+ port: 5432
+ user: iceberg
+ password: iceberg
+ database: iceberg_catalog
+ ssl_mode: disable
+ max_open_conns: 25
+ max_idle_conns: 5
+ conn_max_lifetime: 5m
+ conn_max_idle_time: 5m
+```
+
+## Reference
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `host` | string | `localhost` | PostgreSQL server hostname |
+| `port` | integer | `5432` | PostgreSQL server port |
+| `user` | string | `iceberg` | Database user |
+| `password` | string | `iceberg` | Database password |
+| `database` | string | `iceberg_catalog` | Database name |
+| `ssl_mode` | string | `disable` | SSL mode (`disable`, `require`, `verify-ca`, `verify-full`) |
+| `max_open_conns` | integer | `25` | Maximum open connections |
+| `max_idle_conns` | integer | `5` | Maximum idle connections |
+| `conn_max_lifetime` | duration | `5m` | Maximum connection lifetime |
+| `conn_max_idle_time` | duration | `5m` | Maximum connection idle time |
+
+## Environment Variables
+
+```bash
+ICEBERG_DATABASE_HOST=localhost
+ICEBERG_DATABASE_PORT=5432
+ICEBERG_DATABASE_USER=iceberg
+ICEBERG_DATABASE_PASSWORD=iceberg
+ICEBERG_DATABASE_DATABASE=iceberg_catalog
+ICEBERG_DATABASE_SSL_MODE=disable
+ICEBERG_DATABASE_MAX_OPEN_CONNS=25
+```
+
+## SSL Modes
+
+| Mode | Description |
+|------|-------------|
+| `disable` | No SSL (development only) |
+| `require` | Use SSL but don't verify certificate |
+| `verify-ca` | Verify server certificate against CA |
+| `verify-full` | Verify certificate and hostname |
+
+### Production SSL Configuration
+
+```yaml
+database:
+ host: postgres.example.com
+ ssl_mode: verify-full
+```
+
+## Connection Pool Tuning
+
+### Default Settings
+
+Suitable for most workloads:
+
+```yaml
+database:
+ max_open_conns: 25
+ max_idle_conns: 5
+ conn_max_lifetime: 5m
+ conn_max_idle_time: 5m
+```
+
+### High-Throughput Settings
+
+For high request rates:
+
+```yaml
+database:
+ max_open_conns: 100
+ max_idle_conns: 25
+ conn_max_lifetime: 30m
+ conn_max_idle_time: 10m
+```
+
+## Database Setup
+
+### Create Database
+
+```sql
+CREATE DATABASE iceberg_catalog;
+CREATE USER iceberg WITH PASSWORD 'your-secure-password';
+GRANT ALL PRIVILEGES ON DATABASE iceberg_catalog TO iceberg;
+```
+
+### Migrations
+
+Bingsan automatically runs database migrations on startup. No manual schema setup is required.
+
+## Troubleshooting
+
+### Connection Refused
+
+- Verify PostgreSQL is running
+- Check host and port settings
+- Ensure network connectivity
+
+### Authentication Failed
+
+- Verify user and password
+- Check PostgreSQL's `pg_hba.conf` for allowed connections
+
+### Too Many Connections
+
+- Reduce `max_open_conns`
+- Increase PostgreSQL's `max_connections`
diff --git a/docs-site/content/docs/configuration/_index.md b/docs-site/content/docs/en/configuration/index.mdx
similarity index 76%
rename from docs-site/content/docs/configuration/_index.md
rename to docs-site/content/docs/en/configuration/index.mdx
index 7479190..652bfe8 100644
--- a/docs-site/content/docs/configuration/_index.md
+++ b/docs-site/content/docs/en/configuration/index.mdx
@@ -1,7 +1,6 @@
---
-title: "Configuration"
-weight: 3
-bookCollapseSection: true
+title: Configuration
+description: Complete configuration guide for Bingsan
---
# Configuration
@@ -83,12 +82,10 @@ catalog:
## Configuration Sections
-- [Server]({{< relref "/docs/configuration/server" >}}) - HTTP server settings
-- [Database]({{< relref "/docs/configuration/database" >}}) - PostgreSQL connection
-- [Storage]({{< relref "/docs/configuration/storage" >}}) - S3/GCS/Local storage backends
-- [Authentication]({{< relref "/docs/configuration/auth" >}}) - OAuth2 and API key authentication
-- [Catalog]({{< relref "/docs/configuration/catalog" >}}) - Catalog behavior settings
-- [Monitoring]({{< relref "/docs/configuration/monitoring" >}}) - Metrics and observability
+- [Server](/docs/configuration/server) - HTTP server settings
+- [Database](/docs/configuration/database) - PostgreSQL connection
+- [Storage](/docs/configuration/storage) - S3/GCS/Local storage backends
+- [Authentication](/docs/configuration/auth) - OAuth2 and API key authentication
## Environment Variables
diff --git a/docs-site/content/docs/en/configuration/meta.json b/docs-site/content/docs/en/configuration/meta.json
new file mode 100644
index 0000000..1ee7b56
--- /dev/null
+++ b/docs-site/content/docs/en/configuration/meta.json
@@ -0,0 +1,10 @@
+{
+ "title": "Configuration",
+ "pages": [
+ "index",
+ "server",
+ "database",
+ "storage",
+ "auth"
+ ]
+}
diff --git a/docs-site/content/docs/configuration/server.md b/docs-site/content/docs/en/configuration/server.mdx
similarity index 65%
rename from docs-site/content/docs/configuration/server.md
rename to docs-site/content/docs/en/configuration/server.mdx
index 7232b68..ed0fd1f 100644
--- a/docs-site/content/docs/configuration/server.md
+++ b/docs-site/content/docs/en/configuration/server.mdx
@@ -1,6 +1,6 @@
---
-title: "Server"
-weight: 1
+title: Server Configuration
+description: Configure HTTP server settings for Bingsan
---
# Server Configuration
@@ -63,15 +63,6 @@ server:
host: 127.0.0.1
```
-### Specific Interface
-
-Bind to a specific network interface:
-
-```yaml
-server:
- host: 192.168.1.100
-```
-
## Timeouts
### Read Timeout
@@ -99,22 +90,6 @@ server:
write_timeout: 30s
```
-Increase for large responses:
-
-```yaml
-server:
- write_timeout: 5m # For large table metadata
-```
-
-### Idle Timeout
-
-Maximum time to wait for the next request when keep-alives are enabled.
-
-```yaml
-server:
- idle_timeout: 120s
-```
-
## Debug Mode
Enable debug mode for troubleshooting:
@@ -129,34 +104,9 @@ In debug mode:
- More verbose logging is enabled
- Performance may be reduced
-> [!WARNING]
-> Do not enable debug mode in production.
-
-## Port Selection
-
-### Standard Port
-
-The Iceberg REST Catalog standard port is `8181`:
-
-```yaml
-server:
- port: 8181
-```
-
-### Alternative Ports
-
-Choose an alternative port to avoid conflicts:
-
-```yaml
-server:
- port: 8080 # Common HTTP alternative
- # or
- port: 9181 # Shifted port
-```
-
-### Privileged Ports
-
-To use ports below 1024 (like 80 or 443), the process must run as root or have the `CAP_NET_BIND_SERVICE` capability.
+
+Do not enable debug mode in production.
+
## Production Recommendations
@@ -169,15 +119,3 @@ server:
write_timeout: 60s
idle_timeout: 300s
```
-
-For high-throughput environments:
-
-```yaml
-server:
- host: 0.0.0.0
- port: 8181
- debug: false
- read_timeout: 30s
- write_timeout: 30s
- idle_timeout: 60s # Lower to free connections faster
-```
diff --git a/docs-site/content/docs/en/configuration/storage.mdx b/docs-site/content/docs/en/configuration/storage.mdx
new file mode 100644
index 0000000..89357ac
--- /dev/null
+++ b/docs-site/content/docs/en/configuration/storage.mdx
@@ -0,0 +1,135 @@
+---
+title: Storage Configuration
+description: Configure storage backends for Iceberg data files
+---
+
+# Storage Configuration
+
+Configure the storage backend for Iceberg data files.
+
+## Overview
+
+Bingsan supports three storage backends:
+
+- **S3** - Amazon S3 and compatible services (MinIO, R2)
+- **GCS** - Google Cloud Storage
+- **Local** - Local filesystem (development only)
+
+## Options
+
+```yaml
+storage:
+ type: s3
+ warehouse: s3://bucket/warehouse
+
+ s3:
+ endpoint: ""
+ region: us-east-1
+ access_key_id: ""
+ secret_access_key: ""
+ bucket: warehouse
+ use_path_style: false
+
+ gcs:
+ project: ""
+ credentials_file: ""
+ bucket: ""
+
+ local:
+ root_path: /tmp/iceberg/data
+```
+
+## S3 Options
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `endpoint` | string | `""` | Custom endpoint (for MinIO, R2) |
+| `region` | string | `us-east-1` | AWS region |
+| `access_key_id` | string | `""` | AWS access key |
+| `secret_access_key` | string | `""` | AWS secret key |
+| `bucket` | string | `warehouse` | S3 bucket name |
+| `use_path_style` | boolean | `false` | Use path-style URLs |
+
+## Amazon S3
+
+### With IAM Role (Recommended)
+
+When running on EC2, ECS, EKS, or Lambda:
+
+```yaml
+storage:
+ type: s3
+ warehouse: s3://my-bucket/warehouse
+
+ s3:
+ region: us-east-1
+ bucket: my-bucket
+ # No credentials needed - uses instance role
+```
+
+### Required IAM Permissions
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:GetObject",
+ "s3:PutObject",
+ "s3:DeleteObject",
+ "s3:ListBucket"
+ ],
+ "Resource": [
+ "arn:aws:s3:::my-bucket",
+ "arn:aws:s3:::my-bucket/*"
+ ]
+ }
+ ]
+}
+```
+
+## MinIO
+
+```yaml
+storage:
+ type: s3
+ warehouse: s3://warehouse/data
+
+ s3:
+ endpoint: "http://minio:9000"
+ region: us-east-1
+ access_key_id: "minioadmin"
+ secret_access_key: "minioadmin"
+ bucket: warehouse
+ use_path_style: true
+```
+
+## Google Cloud Storage
+
+```yaml
+storage:
+ type: gcs
+ warehouse: gs://my-bucket/warehouse
+
+ gcs:
+ project: my-gcp-project
+ bucket: my-bucket
+ credentials_file: /path/to/service-account.json
+```
+
+## Local Storage
+
+
+Local storage is intended for development and testing only.
+
+
+```yaml
+storage:
+ type: local
+ warehouse: file:///data/warehouse
+
+ local:
+ root_path: /data/warehouse
+```
diff --git a/docs-site/content/docs/en/contributing/code-style.mdx b/docs-site/content/docs/en/contributing/code-style.mdx
new file mode 100644
index 0000000..ae8098c
--- /dev/null
+++ b/docs-site/content/docs/en/contributing/code-style.mdx
@@ -0,0 +1,155 @@
+---
+title: Code Style
+description: Code style guidelines and linting
+---
+
+# Code Style
+
+Bingsan follows Go best practices with additional project-specific conventions.
+
+## Formatting
+
+The project uses **gofumpt** (a stricter gofmt):
+
+```bash
+# Install
+go install mvdan.cc/gofumpt@latest
+
+# Format
+gofumpt -w .
+```
+
+## Linting
+
+golangci-lint is configured in `.golangci.yml`:
+
+```bash
+# Run linter
+make lint
+
+# Auto-fix issues
+make lint-fix
+```
+
+### Enabled Linters
+
+Key linters include:
+- `gofumpt` - Stricter formatting
+- `gosec` - Security checks
+- `gocritic` - Code quality
+- `revive` - Style checks
+- `errcheck` - Error handling
+- `staticcheck` - Static analysis
+
+## Import Organization
+
+Imports should be grouped in this order:
+
+```go
+import (
+ // Standard library
+ "context"
+ "fmt"
+
+ // Third-party packages
+ "github.com/gofiber/fiber/v2"
+ "github.com/jackc/pgx/v5"
+
+ // Local packages
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/db"
+)
+```
+
+## Naming Conventions
+
+### Files
+
+- Lowercase with underscores: `table_handler.go`
+- Test files: `table_handler_test.go`
+
+### Functions
+
+- Exported: `PascalCase` - `CreateTable`
+- Unexported: `camelCase` - `validateSchema`
+
+### Variables
+
+- Short-lived: `t`, `ctx`, `err`
+- Descriptive for complex types: `tableMetadata`, `namespaceList`
+
+## Error Handling
+
+Always handle errors explicitly:
+
+```go
+// Good
+result, err := doSomething()
+if err != nil {
+ return fmt.Errorf("failed to do something: %w", err)
+}
+
+// Bad - ignoring error
+result, _ := doSomething()
+```
+
+## JSON Serialization
+
+Use `goccy/go-json` instead of `encoding/json`:
+
+```go
+import "github.com/goccy/go-json"
+
+// Marshal
+data, err := json.Marshal(obj)
+
+// Unmarshal
+err := json.Unmarshal(data, &obj)
+```
+
+## Context Usage
+
+Always accept context as first parameter:
+
+```go
+func (h *Handler) GetTable(ctx context.Context, name string) (*Table, error) {
+ // Use ctx for cancellation, deadlines, and tracing
+}
+```
+
+## Complexity Limits
+
+- Cyclomatic complexity: max 15
+- Function length: aim for under 50 lines
+- File length: aim for under 500 lines
+
+## Documentation
+
+### Package Comments
+
+```go
+// Package handlers provides HTTP handlers for the Iceberg REST API.
+// It implements the Apache Iceberg REST Catalog specification.
+package handlers
+```
+
+### Function Comments
+
+```go
+// CreateTable creates a new table in the specified namespace.
+// It validates the table schema and metadata before persisting.
+// Returns the created table metadata or an error.
+func (h *Handler) CreateTable(ctx context.Context, req CreateTableRequest) (*Table, error) {
+```
+
+## Commit Messages
+
+Follow conventional commits:
+
+```text
+feat: add table compaction endpoint
+fix: correct metadata serialization for partitions
+docs: update API reference for views
+test: add integration tests for namespace operations
+refactor: extract validation logic to separate package
+```
diff --git a/docs-site/content/docs/en/contributing/development.mdx b/docs-site/content/docs/en/contributing/development.mdx
new file mode 100644
index 0000000..106cd55
--- /dev/null
+++ b/docs-site/content/docs/en/contributing/development.mdx
@@ -0,0 +1,138 @@
+---
+title: Development
+description: Setting up the development environment
+---
+
+# Development Environment
+
+Set up your local development environment for Bingsan.
+
+## Prerequisites
+
+- Go 1.23+
+- Docker and Docker Compose
+- Make
+- PostgreSQL client (psql) - optional
+
+## Quick Setup
+
+```bash
+# Clone and enter directory
+git clone https://github.com/teamPaprika/bingsan.git
+cd bingsan
+
+# Install Go dependencies
+go mod download
+
+# Install development tools
+make install-tools
+
+# Copy configuration
+cp config.example.yaml config.yaml
+
+# Start PostgreSQL
+make docker-up
+
+# Run with hot reload
+make dev
+```
+
+## Build Commands
+
+```bash
+# Build binary
+make build
+
+# Run without hot reload
+make run
+
+# Clean build artifacts
+make clean
+```
+
+## Database Management
+
+```bash
+# Apply all migrations
+make migrate-up
+
+# Rollback one migration
+make migrate-down
+
+# Create new migration
+make migrate-create name=add_feature_table
+```
+
+## Hot Reload
+
+The project uses [air](https://github.com/cosmtrek/air) for hot reload during development:
+
+```bash
+make dev
+```
+
+Air watches for file changes and automatically rebuilds and restarts the server.
+
+## IDE Setup
+
+### VS Code
+
+Recommended extensions:
+- Go (golang.go)
+- EditorConfig
+- YAML
+
+Settings:
+```json
+{
+ "go.lintTool": "golangci-lint",
+ "go.lintFlags": ["--fast"],
+ "editor.formatOnSave": true
+}
+```
+
+### GoLand
+
+- Enable Go Modules integration
+- Configure golangci-lint as external tool
+- Set gofumpt as the formatter
+
+## Environment Variables
+
+For development, environment variables can override config.yaml:
+
+```bash
+export ICEBERG_SERVER_PORT=8181
+export ICEBERG_DATABASE_HOST=localhost
+export ICEBERG_DEBUG=true
+```
+
+## Debugging
+
+### Delve Debugger
+
+```bash
+# Install delve
+go install github.com/go-delve/delve/cmd/dlv@latest
+
+# Debug
+dlv debug ./cmd/iceberg-catalog
+```
+
+### VS Code Launch Configuration
+
+```json
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Launch Bingsan",
+ "type": "go",
+ "request": "launch",
+ "mode": "auto",
+ "program": "${workspaceFolder}/cmd/iceberg-catalog",
+ "args": ["-config", "config.yaml"]
+ }
+ ]
+}
+```
diff --git a/docs-site/content/docs/en/contributing/index.mdx b/docs-site/content/docs/en/contributing/index.mdx
new file mode 100644
index 0000000..48fd639
--- /dev/null
+++ b/docs-site/content/docs/en/contributing/index.mdx
@@ -0,0 +1,93 @@
+---
+title: Contributing
+description: How to contribute to Bingsan
+---
+
+# Contributing to Bingsan
+
+Thank you for your interest in contributing to Bingsan! This guide will help you get started.
+
+## Getting Started
+
+1. Fork the repository on GitHub
+2. Clone your fork locally
+3. Set up the development environment
+4. Create a feature branch
+5. Make your changes
+6. Submit a pull request
+
+## Development Setup
+
+```bash
+# Clone the repository
+git clone https://github.com/YOUR_USERNAME/bingsan.git
+cd bingsan
+
+# Install dependencies
+go mod download
+
+# Install development tools
+make install-tools
+
+# Copy configuration
+cp config.example.yaml config.yaml
+
+# Start dependencies
+make docker-up
+
+# Run the server
+make dev
+```
+
+## Project Structure
+
+```text
+bingsan/
+├── cmd/iceberg-catalog/ # Application entry point
+├── internal/
+│ ├── api/ # HTTP handlers and middleware
+│ ├── db/ # Database layer and migrations
+│ ├── events/ # Event streaming
+│ ├── pool/ # Object pooling
+│ ├── config/ # Configuration
+│ └── metrics/ # Prometheus metrics
+├── tests/ # Test suites
+├── deployments/ # Docker and K8s configs
+└── docs/ # Documentation
+```
+
+## Contribution Types
+
+### Bug Reports
+
+- Search existing issues first
+- Include reproduction steps
+- Provide environment details
+- Attach relevant logs
+
+### Feature Requests
+
+- Check the roadmap and existing issues
+- Describe the use case
+- Propose a solution if possible
+
+### Code Contributions
+
+- Follow the [code style guide](/docs/contributing/code-style)
+- Write tests for new functionality
+- Update documentation as needed
+- Keep commits focused and well-described
+
+## Pull Request Process
+
+1. Update the README or docs if needed
+2. Ensure all tests pass
+3. Update the CHANGELOG if applicable
+4. Request review from maintainers
+5. Address review feedback promptly
+
+## Communication
+
+- GitHub Issues for bugs and features
+- GitHub Discussions for questions
+- Pull Request comments for code review
diff --git a/docs-site/content/docs/en/contributing/meta.json b/docs-site/content/docs/en/contributing/meta.json
new file mode 100644
index 0000000..6a43dc8
--- /dev/null
+++ b/docs-site/content/docs/en/contributing/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "Contributing",
+ "pages": ["index", "development", "testing", "code-style"]
+}
diff --git a/docs-site/content/docs/en/contributing/testing.mdx b/docs-site/content/docs/en/contributing/testing.mdx
new file mode 100644
index 0000000..a74cd77
--- /dev/null
+++ b/docs-site/content/docs/en/contributing/testing.mdx
@@ -0,0 +1,160 @@
+---
+title: Testing
+description: Running and writing tests
+---
+
+# Testing
+
+Bingsan has a comprehensive test suite including unit, integration, and benchmark tests.
+
+## Running Tests
+
+```bash
+# Run all tests
+make test
+
+# Run with verbose output
+go test -v ./...
+
+# Run specific package
+go test -v ./internal/api/handlers/...
+
+# Run specific test
+go test -v -run TestTableCreate ./tests/unit/...
+```
+
+## Test Structure
+
+```text
+tests/
+├── unit/ # Unit tests
+├── integration/ # Integration tests (require database)
+├── contract/ # API contract tests
+├── e2e/ # End-to-end tests
+├── benchmark/ # Performance benchmarks
+└── fixtures/ # Test data and SQL fixtures
+```
+
+## Unit Tests
+
+Unit tests are located alongside the code or in `tests/unit/`:
+
+```bash
+# Run unit tests only
+go test -v ./internal/...
+```
+
+## Integration Tests
+
+Integration tests require a running PostgreSQL instance:
+
+```bash
+# Start dependencies
+make docker-up
+
+# Run integration tests
+make test-integration
+```
+
+Integration tests use the `integration` build tag:
+
+```go
+//go:build integration
+
+package integration
+
+func TestDatabaseOperations(t *testing.T) {
+ // ...
+}
+```
+
+## Contract Tests
+
+Contract tests verify API compliance with the Iceberg REST spec:
+
+```bash
+go test -v ./tests/contract/...
+```
+
+## Writing Tests
+
+### Test Naming
+
+```go
+func TestFeature_Scenario_ExpectedBehavior(t *testing.T) {
+ // Example: TestTableCreate_WithValidInput_ReturnsCreatedTable
+}
+```
+
+### Table-Driven Tests
+
+```go
+func TestValidation(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ wantErr bool
+ }{
+ {"valid input", "test", false},
+ {"empty input", "", true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := Validate(tt.input)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+```
+
+### Test Fixtures
+
+Use SQL fixtures for database tests:
+
+```bash
+tests/fixtures/
+├── namespaces.sql
+├── tables.sql
+└── cleanup.sql
+```
+
+## Benchmarks
+
+```bash
+# Run all benchmarks
+make bench
+
+# Run specific benchmark
+go test -bench=BenchmarkTable -benchmem ./tests/benchmark/...
+
+# Compare before/after
+go test -bench=. ./tests/benchmark/... | tee before.txt
+# Make changes
+go test -bench=. ./tests/benchmark/... | tee after.txt
+benchstat before.txt after.txt
+```
+
+## Coverage
+
+```bash
+# Generate coverage report
+go test -coverprofile=coverage.out ./...
+
+# View in browser
+go tool cover -html=coverage.out
+
+# Check coverage percentage
+go tool cover -func=coverage.out
+```
+
+## CI Integration
+
+Tests run automatically on pull requests via GitHub Actions. The CI pipeline:
+
+1. Runs linters (golangci-lint)
+2. Runs unit tests
+3. Runs integration tests with PostgreSQL service
+4. Reports coverage
diff --git a/docs-site/content/docs/en/deployment/docker.mdx b/docs-site/content/docs/en/deployment/docker.mdx
new file mode 100644
index 0000000..c9b33be
--- /dev/null
+++ b/docs-site/content/docs/en/deployment/docker.mdx
@@ -0,0 +1,155 @@
+---
+title: Docker
+description: Deploy Bingsan using Docker or Docker Compose
+---
+
+# Docker Deployment
+
+Deploy Bingsan using Docker or Docker Compose.
+
+## Docker Compose (Development)
+
+The fastest way to get started for development and testing.
+
+### Prerequisites
+
+- Docker Engine 20.10+
+- Docker Compose v2.0+
+
+### Quick Start
+
+```bash
+# Clone the repository
+git clone https://github.com/teamPaprika/bingsan.git
+cd bingsan
+
+# Copy configuration
+cp config.example.yaml config.yaml
+
+# Start services
+docker compose -f deployments/docker/docker-compose.yml up -d
+```
+
+### docker-compose.yml
+
+```yaml
+version: '3.8'
+
+services:
+ bingsan:
+ image: ghcr.io/kimuyb/bingsan:latest
+ ports:
+ - "8181:8181"
+ environment:
+ - ICEBERG_SERVER_HOST=0.0.0.0
+ - ICEBERG_SERVER_PORT=8181
+ - ICEBERG_DATABASE_HOST=postgres
+ - ICEBERG_DATABASE_PORT=5432
+ - ICEBERG_DATABASE_USER=iceberg
+ - ICEBERG_DATABASE_PASSWORD=iceberg
+ - ICEBERG_DATABASE_DATABASE=iceberg_catalog
+ - ICEBERG_STORAGE_TYPE=local
+ - ICEBERG_STORAGE_WAREHOUSE=file:///data/warehouse
+ volumes:
+ - warehouse-data:/data/warehouse
+ depends_on:
+ postgres:
+ condition: service_healthy
+
+ postgres:
+ image: postgres:15-alpine
+ environment:
+ - POSTGRES_USER=iceberg
+ - POSTGRES_PASSWORD=iceberg
+ - POSTGRES_DB=iceberg_catalog
+ ports:
+ - "5432:5432"
+ volumes:
+ - postgres-data:/var/lib/postgresql/data
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U iceberg -d iceberg_catalog"]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+volumes:
+ warehouse-data:
+ postgres-data:
+```
+
+### Managing the Stack
+
+```bash
+# View logs
+docker compose -f deployments/docker/docker-compose.yml logs -f
+
+# Stop services
+docker compose -f deployments/docker/docker-compose.yml down
+
+# Stop and remove volumes
+docker compose -f deployments/docker/docker-compose.yml down -v
+```
+
+## Docker Compose with S3 (MinIO)
+
+For testing with S3-compatible storage, add MinIO to your compose file with the appropriate environment variables for S3 storage configuration.
+
+## Standalone Docker
+
+Run Bingsan as a standalone container (requires external PostgreSQL).
+
+```bash
+# Pull the image
+docker pull ghcr.io/kimuyb/bingsan:latest
+
+# Run with environment variables
+docker run -d \
+ --name bingsan \
+ -p 8181:8181 \
+ -e ICEBERG_DATABASE_HOST=your-postgres-host \
+ -e ICEBERG_DATABASE_PORT=5432 \
+ -e ICEBERG_DATABASE_USER=iceberg \
+ -e ICEBERG_DATABASE_PASSWORD=your-password \
+ -e ICEBERG_DATABASE_DATABASE=iceberg_catalog \
+ -e ICEBERG_STORAGE_TYPE=s3 \
+ -e ICEBERG_STORAGE_WAREHOUSE=s3://your-bucket/warehouse \
+ -e ICEBERG_STORAGE_S3_REGION=us-east-1 \
+ ghcr.io/kimuyb/bingsan:latest
+```
+
+## Production Recommendations
+
+1. **Use specific image tags**: Don't use `latest` in production
+2. **Resource limits**: Set memory and CPU limits
+3. **Health checks**: Configure container health checks
+4. **Logging**: Use json-file or external logging driver
+5. **Secrets**: Use Docker secrets or external secret management
+
+## Troubleshooting
+
+### Container Won't Start
+
+```bash
+# Check logs
+docker logs bingsan
+
+# Check if port is in use
+lsof -i :8181
+```
+
+### Database Connection Failed
+
+```bash
+# Test PostgreSQL connectivity
+docker exec bingsan nc -zv postgres 5432
+
+# Check environment variables
+docker exec bingsan env | grep ICEBERG_DATABASE
+```
+
+### Health Check Failing
+
+```bash
+# Test health endpoint
+docker exec bingsan wget -q -O- http://localhost:8181/health
+```
diff --git a/docs-site/content/docs/deployment/_index.md b/docs-site/content/docs/en/deployment/index.mdx
similarity index 72%
rename from docs-site/content/docs/deployment/_index.md
rename to docs-site/content/docs/en/deployment/index.mdx
index 06d99f3..4a2f66c 100644
--- a/docs-site/content/docs/deployment/_index.md
+++ b/docs-site/content/docs/en/deployment/index.mdx
@@ -1,7 +1,6 @@
---
-title: "Deployment"
-weight: 5
-bookCollapseSection: true
+title: Deployment
+description: Deploy Bingsan in various environments
---
# Deployment
@@ -12,12 +11,12 @@ This section covers deploying Bingsan in various environments.
### Development
-- [Docker Compose]({{< relref "/docs/deployment/docker" >}}) - Quick local setup
+- [Docker Compose](/docs/deployment/docker) - Quick local setup
### Production
-- [Kubernetes]({{< relref "/docs/deployment/kubernetes" >}}) - Scalable cloud deployment
-- [Docker]({{< relref "/docs/deployment/docker" >}}) - Single-node production
+- [Kubernetes](/docs/deployment/kubernetes) - Scalable cloud deployment
+- [Docker](/docs/deployment/docker) - Single-node production
## Quick Comparison
@@ -55,4 +54,4 @@ Regardless of deployment method, you'll need to configure:
2. **Storage**: S3/GCS credentials and bucket
3. **Auth** (optional): OAuth/API key settings
-See [Configuration]({{< relref "/docs/configuration" >}}) for all options.
+See [Configuration](/docs/configuration) for all options.
diff --git a/docs-site/content/docs/en/deployment/kubernetes.mdx b/docs-site/content/docs/en/deployment/kubernetes.mdx
new file mode 100644
index 0000000..096bf31
--- /dev/null
+++ b/docs-site/content/docs/en/deployment/kubernetes.mdx
@@ -0,0 +1,225 @@
+---
+title: Kubernetes
+description: Deploy Bingsan on Kubernetes for production
+---
+
+# Kubernetes Deployment
+
+Deploy Bingsan on Kubernetes for production workloads.
+
+## Prerequisites
+
+- Kubernetes 1.24+
+- kubectl configured
+- PostgreSQL database (managed or self-hosted)
+- Object storage (S3/GCS)
+
+## Quick Start
+
+### 1. Create Namespace
+
+```bash
+kubectl create namespace bingsan
+```
+
+### 2. Create Secrets
+
+```bash
+# Database credentials
+kubectl create secret generic bingsan-db \
+ --namespace bingsan \
+ --from-literal=host=postgres.example.com \
+ --from-literal=port=5432 \
+ --from-literal=user=iceberg \
+ --from-literal=password=your-password \
+ --from-literal=database=iceberg_catalog
+
+# S3 credentials (if using static credentials)
+kubectl create secret generic bingsan-s3 \
+ --namespace bingsan \
+ --from-literal=access-key-id=AKIA... \
+ --from-literal=secret-access-key=...
+```
+
+### 3. Create ConfigMap
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: bingsan-config
+ namespace: bingsan
+data:
+ config.yaml: |
+ server:
+ host: 0.0.0.0
+ port: 8181
+ debug: false
+
+ storage:
+ type: s3
+ warehouse: s3://your-bucket/warehouse
+ s3:
+ region: us-east-1
+ bucket: your-bucket
+
+ catalog:
+ lock_timeout: 30s
+```
+
+### 4. Deploy
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: bingsan
+ template:
+ metadata:
+ labels:
+ app: bingsan
+ spec:
+ containers:
+ - name: bingsan
+ image: ghcr.io/kimuyb/bingsan:latest
+ ports:
+ - containerPort: 8181
+ name: http
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "1000m"
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: http
+ initialDelaySeconds: 5
+ periodSeconds: 5
+```
+
+## Service
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ type: ClusterIP
+ selector:
+ app: bingsan
+ ports:
+ - port: 8181
+ targetPort: http
+ name: http
+```
+
+## HorizontalPodAutoscaler
+
+```yaml
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: bingsan
+ minReplicas: 3
+ maxReplicas: 20
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 70
+```
+
+## PodDisruptionBudget
+
+```yaml
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ minAvailable: 2
+ selector:
+ matchLabels:
+ app: bingsan
+```
+
+## Cloud Provider Integration
+
+### AWS IAM Roles for Service Accounts (IRSA)
+
+For AWS S3 access without static credentials, use IAM roles for service accounts.
+
+### GCP Workload Identity
+
+For GCS access, configure Workload Identity on your GKE cluster.
+
+## Monitoring
+
+### ServiceMonitor (Prometheus Operator)
+
+```yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ selector:
+ matchLabels:
+ app: bingsan
+ endpoints:
+ - port: http
+ path: /metrics
+ interval: 15s
+```
+
+## Troubleshooting
+
+### Check Pod Status
+
+```bash
+kubectl get pods -n bingsan
+kubectl describe pod -n bingsan bingsan-xxx
+```
+
+### View Logs
+
+```bash
+kubectl logs -n bingsan -l app=bingsan --tail=100 -f
+```
+
+### Test Connectivity
+
+```bash
+# Port forward for local testing
+kubectl port-forward -n bingsan svc/bingsan 8181:8181
+
+# Test health
+curl http://localhost:8181/health
+```
diff --git a/docs-site/content/docs/en/deployment/meta.json b/docs-site/content/docs/en/deployment/meta.json
new file mode 100644
index 0000000..2add468
--- /dev/null
+++ b/docs-site/content/docs/en/deployment/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "Deployment",
+ "pages": ["index", "docker", "kubernetes"]
+}
diff --git a/docs-site/content/docs/getting-started/first-steps.md b/docs-site/content/docs/en/getting-started/first-steps.mdx
similarity index 93%
rename from docs-site/content/docs/getting-started/first-steps.md
rename to docs-site/content/docs/en/getting-started/first-steps.mdx
index 03826a4..c237e4d 100644
--- a/docs-site/content/docs/getting-started/first-steps.md
+++ b/docs-site/content/docs/en/getting-started/first-steps.mdx
@@ -1,6 +1,6 @@
---
-title: "First Steps"
-weight: 3
+title: First Steps
+description: Create your first namespace and table in Bingsan
---
# First Steps
@@ -194,8 +194,9 @@ curl -X DELETE http://localhost:8181/v1/namespaces/analytics/tables/user_events
### Delete Namespace
-> [!WARNING]
-> Namespaces must be empty before deletion.
+
+Namespaces must be empty before deletion.
+
```bash
curl -X DELETE http://localhost:8181/v1/namespaces/analytics
@@ -224,6 +225,6 @@ spark.sql("SELECT * FROM bingsan.analytics.user_events LIMIT 10").show()
## Next Steps
-- [Explore all API endpoints]({{< relref "/docs/api" >}})
-- [Configure authentication]({{< relref "/docs/configuration/auth" >}})
-- [Set up monitoring]({{< relref "/docs/configuration/monitoring" >}})
+- [Explore all API endpoints](/docs/api)
+- [Configure authentication](/docs/configuration/auth)
+- [Set up monitoring](/docs/configuration/monitoring)
diff --git a/docs-site/content/docs/en/getting-started/index.mdx b/docs-site/content/docs/en/getting-started/index.mdx
new file mode 100644
index 0000000..280f61d
--- /dev/null
+++ b/docs-site/content/docs/en/getting-started/index.mdx
@@ -0,0 +1,22 @@
+---
+title: Getting Started
+description: Get started with Bingsan, the high-performance Apache Iceberg REST Catalog
+---
+
+# Getting Started
+
+This section covers everything you need to get Bingsan up and running.
+
+## Prerequisites
+
+Before installing Bingsan, ensure you have the following:
+
+- **Go 1.25+** (for building from source)
+- **PostgreSQL 15+** (metadata storage)
+- **Docker & Docker Compose** (recommended for development)
+
+## Sections
+
+- [Quick Start](/docs/getting-started/quick-start) - Get running in 5 minutes
+- [Installation](/docs/getting-started/installation) - Detailed installation options
+- [First Steps](/docs/getting-started/first-steps) - Create your first namespace and table
diff --git a/docs-site/content/docs/getting-started/installation.md b/docs-site/content/docs/en/getting-started/installation.mdx
similarity index 90%
rename from docs-site/content/docs/getting-started/installation.md
rename to docs-site/content/docs/en/getting-started/installation.mdx
index 05f4607..609d4fa 100644
--- a/docs-site/content/docs/getting-started/installation.md
+++ b/docs-site/content/docs/en/getting-started/installation.mdx
@@ -1,6 +1,6 @@
---
-title: "Installation"
-weight: 2
+title: Installation
+description: Complete guide to all Bingsan installation methods
---
# Installation
@@ -20,7 +20,7 @@ The easiest way to run Bingsan for development and testing.
```bash
# Clone the repository
-git clone https://github.com/kimuyb/bingsan.git
+git clone https://github.com/teamPaprika/bingsan.git
cd bingsan
# Copy configuration
@@ -43,7 +43,7 @@ Build and run Bingsan directly on your machine.
```bash
# Clone the repository
-git clone https://github.com/kimuyb/bingsan.git
+git clone https://github.com/teamPaprika/bingsan.git
cd bingsan
# Download dependencies
@@ -109,7 +109,7 @@ docker run -d \
Deploy to Kubernetes using Helm or raw manifests.
-See the [Kubernetes Deployment Guide]({{< relref "/docs/deployment/kubernetes" >}}) for detailed instructions.
+See the [Kubernetes Deployment Guide](/docs/deployment/kubernetes) for detailed instructions.
## Database Setup
@@ -157,4 +157,4 @@ All configuration options can be set via environment variables using the `ICEBER
| `ICEBERG_DATABASE_DATABASE` | `database.database` | Database name |
| `ICEBERG_AUTH_ENABLED` | `auth.enabled` | Enable authentication |
-See the [Configuration Guide]({{< relref "/docs/configuration" >}}) for all options.
+See the [Configuration Guide](/docs/configuration) for all options.
diff --git a/docs-site/content/docs/en/getting-started/meta.json b/docs-site/content/docs/en/getting-started/meta.json
new file mode 100644
index 0000000..d3ec6cd
--- /dev/null
+++ b/docs-site/content/docs/en/getting-started/meta.json
@@ -0,0 +1,9 @@
+{
+ "title": "Getting Started",
+ "pages": [
+ "index",
+ "quick-start",
+ "installation",
+ "first-steps"
+ ]
+}
diff --git a/docs-site/content/docs/getting-started/quick-start.md b/docs-site/content/docs/en/getting-started/quick-start.mdx
similarity index 80%
rename from docs-site/content/docs/getting-started/quick-start.md
rename to docs-site/content/docs/en/getting-started/quick-start.mdx
index 0db6115..e9933ac 100644
--- a/docs-site/content/docs/getting-started/quick-start.md
+++ b/docs-site/content/docs/en/getting-started/quick-start.mdx
@@ -1,6 +1,6 @@
---
-title: "Quick Start"
-weight: 1
+title: Quick Start
+description: Get Bingsan running in under 5 minutes using Docker Compose
---
# Quick Start
@@ -15,7 +15,7 @@ Get Bingsan running in under 5 minutes using Docker Compose.
## Step 1: Clone the Repository
```bash
-git clone https://github.com/kimuyb/bingsan.git
+git clone https://github.com/teamPaprika/bingsan.git
cd bingsan
```
@@ -27,7 +27,7 @@ Copy the example configuration file:
cp config.example.yaml config.yaml
```
-The default configuration works out of the box for local development. For production, see the [Configuration Guide]({{< relref "/docs/configuration" >}}).
+The default configuration works out of the box for local development. For production, see the [Configuration Guide](/docs/configuration).
## Step 3: Start with Docker Compose
@@ -93,9 +93,9 @@ Response:
## Next Steps
-- [Create your first table]({{< relref "/docs/getting-started/first-steps" >}})
-- [Learn about all API endpoints]({{< relref "/docs/api" >}})
-- [Configure for production]({{< relref "/docs/configuration" >}})
+- [Create your first table](/docs/getting-started/first-steps)
+- [Learn about all API endpoints](/docs/api)
+- [Configure for production](/docs/configuration)
## Stopping the Services
diff --git a/docs-site/content/docs/en/integrations/index.mdx b/docs-site/content/docs/en/integrations/index.mdx
new file mode 100644
index 0000000..3ae3eb3
--- /dev/null
+++ b/docs-site/content/docs/en/integrations/index.mdx
@@ -0,0 +1,23 @@
+---
+title: Integrations
+description: Connect Bingsan with Spark, Trino, PyIceberg, and more
+---
+
+# Integrations
+
+Bingsan integrates with all major Iceberg clients through the standard REST Catalog API.
+
+## Supported Clients
+
+- [Apache Spark](/docs/integrations/spark) - Batch and streaming data processing
+- [Trino](/docs/integrations/trino) - Distributed SQL query engine
+- [PyIceberg](/docs/integrations/pyiceberg) - Python library for Iceberg
+
+## Connection Overview
+
+All clients connect using the REST catalog type:
+
+```
+Catalog URI: http://localhost:8181
+Catalog Type: rest
+```
diff --git a/docs-site/content/docs/en/integrations/meta.json b/docs-site/content/docs/en/integrations/meta.json
new file mode 100644
index 0000000..6081d12
--- /dev/null
+++ b/docs-site/content/docs/en/integrations/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "Integrations",
+ "pages": ["index", "spark", "trino", "pyiceberg"]
+}
diff --git a/docs-site/content/docs/en/integrations/pyiceberg.mdx b/docs-site/content/docs/en/integrations/pyiceberg.mdx
new file mode 100644
index 0000000..09ef759
--- /dev/null
+++ b/docs-site/content/docs/en/integrations/pyiceberg.mdx
@@ -0,0 +1,51 @@
+---
+title: PyIceberg
+description: Use PyIceberg Python library with Bingsan
+---
+
+# PyIceberg Integration
+
+Use PyIceberg to interact with Bingsan from Python.
+
+## Installation
+
+```bash
+pip install pyiceberg
+```
+
+## Connection
+
+```python
+from pyiceberg.catalog import load_catalog
+
+catalog = load_catalog(
+ "rest",
+ uri="http://localhost:8181"
+)
+```
+
+## With Authentication
+
+```python
+catalog = load_catalog(
+ "rest",
+ uri="http://localhost:8181",
+ credential="client_id:client_secret"
+)
+```
+
+## Usage
+
+```python
+# List namespaces
+catalog.list_namespaces()
+
+# List tables
+catalog.list_tables("analytics")
+
+# Load table
+table = catalog.load_table("analytics.user_events")
+
+# Read data
+df = table.scan().to_pandas()
+```
diff --git a/docs-site/content/docs/en/integrations/spark.mdx b/docs-site/content/docs/en/integrations/spark.mdx
new file mode 100644
index 0000000..8642f9c
--- /dev/null
+++ b/docs-site/content/docs/en/integrations/spark.mdx
@@ -0,0 +1,45 @@
+---
+title: Apache Spark
+description: Connect Apache Spark to Bingsan REST Catalog
+---
+
+# Apache Spark Integration
+
+Connect Apache Spark to Bingsan for reading and writing Iceberg tables.
+
+## Configuration
+
+```python
+from pyspark.sql import SparkSession
+
+spark = SparkSession.builder \
+ .appName("Bingsan Example") \
+ .config("spark.sql.catalog.bingsan", "org.apache.iceberg.spark.SparkCatalog") \
+ .config("spark.sql.catalog.bingsan.type", "rest") \
+ .config("spark.sql.catalog.bingsan.uri", "http://localhost:8181") \
+ .getOrCreate()
+```
+
+## With Authentication
+
+```python
+spark = SparkSession.builder \
+ .config("spark.sql.catalog.bingsan.credential", "client_id:client_secret") \
+ .getOrCreate()
+```
+
+## Usage
+
+```sql
+-- List tables
+SHOW TABLES IN bingsan.analytics;
+
+-- Query table
+SELECT * FROM bingsan.analytics.user_events LIMIT 10;
+
+-- Create table
+CREATE TABLE bingsan.analytics.events (
+ id BIGINT,
+ data STRING
+) USING iceberg;
+```
diff --git a/docs-site/content/docs/en/integrations/trino.mdx b/docs-site/content/docs/en/integrations/trino.mdx
new file mode 100644
index 0000000..8f5b800
--- /dev/null
+++ b/docs-site/content/docs/en/integrations/trino.mdx
@@ -0,0 +1,39 @@
+---
+title: Trino
+description: Connect Trino to Bingsan REST Catalog
+---
+
+# Trino Integration
+
+Connect Trino to Bingsan for distributed SQL queries on Iceberg tables.
+
+## Connector Configuration
+
+Create `etc/catalog/iceberg.properties`:
+
+```properties
+connector.name=iceberg
+iceberg.catalog.type=rest
+iceberg.rest-catalog.uri=http://bingsan:8181
+```
+
+## With Authentication
+
+```properties
+connector.name=iceberg
+iceberg.catalog.type=rest
+iceberg.rest-catalog.uri=http://bingsan:8181
+iceberg.rest-catalog.security=OAUTH2
+iceberg.rest-catalog.oauth2.client-id=client_id
+iceberg.rest-catalog.oauth2.client-secret=client_secret
+```
+
+## Usage
+
+```sql
+-- List schemas
+SHOW SCHEMAS FROM iceberg;
+
+-- Query table
+SELECT * FROM iceberg.analytics.user_events LIMIT 10;
+```
diff --git a/docs-site/content/docs/en/performance/benchmarking.mdx b/docs-site/content/docs/en/performance/benchmarking.mdx
new file mode 100644
index 0000000..a2f4f26
--- /dev/null
+++ b/docs-site/content/docs/en/performance/benchmarking.mdx
@@ -0,0 +1,196 @@
+---
+title: Benchmarking
+description: Load testing with Go benchmarks and Apache Polaris Tools
+---
+
+import { Callout } from 'fumadocs-ui/components/callout';
+
+# Benchmarking
+
+Bingsan includes comprehensive benchmarking support using both Go's built-in benchmarks and the Apache Polaris Tools Gatling framework.
+
+## Quick Start
+
+### Go Benchmarks
+
+```bash
+# All benchmarks
+go test -bench=. -benchmem ./tests/benchmark/...
+
+# Specific benchmark
+go test -bench=BenchmarkTable -benchmem ./tests/benchmark/...
+
+# Pool benchmarks
+go test -bench=BenchmarkPool -benchmem ./tests/benchmark/...
+```
+
+### Load Testing with Polaris Tools
+
+```bash
+cd benchmarks
+
+# One-time setup
+make setup
+
+# Start Bingsan with OAuth2
+make start-bingsan
+
+# Create test dataset
+make create-dataset
+
+# Run read benchmark
+make read-benchmark
+
+# View results
+make report
+```
+
+## Go Benchmarks
+
+### Available Benchmarks
+
+| Benchmark | Description |
+|-----------|-------------|
+| `BenchmarkBaseline` | Baseline without pooling |
+| `BenchmarkPool` | With object pooling |
+| `BenchmarkTable` | Table operations |
+| `BenchmarkNamespace` | Namespace operations |
+| `BenchmarkConcurrent` | Concurrent load |
+| `BenchmarkMemory` | Memory allocation |
+
+### Compare Before/After
+
+```bash
+# Baseline
+go test -bench=. -benchmem ./tests/benchmark/... | tee baseline.txt
+
+# After changes
+go test -bench=. -benchmem ./tests/benchmark/... | tee optimized.txt
+
+# Compare
+benchstat baseline.txt optimized.txt
+```
+
+### With Profiling
+
+```bash
+# CPU profile
+go test -bench=BenchmarkTable -cpuprofile=cpu.prof ./tests/benchmark/...
+go tool pprof cpu.prof
+
+# Memory profile
+go test -bench=BenchmarkTable -memprofile=mem.prof ./tests/benchmark/...
+go tool pprof mem.prof
+```
+
+### Expected Results
+
+| Benchmark | Ops/sec | ns/op | B/op | allocs/op |
+|-----------|---------|-------|------|-----------|
+| TableMetadata | 100,000+ | <10,000 | <5,000 | <50 |
+| LargeSchema | 10,000+ | <100,000 | <50,000 | <200 |
+| PoolGet/Put | 20,000,000+ | <50 | 0 | 0 |
+
+## Polaris Tools Load Testing
+
+### Prerequisites
+
+- **Java 17+** - Required for Gatling
+- **Docker & Docker Compose** - For running Bingsan
+- **Make** - For running benchmark commands
+
+### Setup
+
+```bash
+cd benchmarks
+make setup
+```
+
+### Running Benchmarks
+
+| Command | Description |
+|---------|-------------|
+| `make read-benchmark` | Read-only operations |
+| `make read-update-benchmark` | Mixed read/write (80/20) |
+| `make create-commits-benchmark` | Commit throughput |
+| `make weighted-benchmark` | Weighted workload simulation |
+| `make full-benchmark` | All benchmarks sequentially |
+
+### Configuration
+
+Edit `config/bingsan.conf`:
+
+```text
+http.base-url = "http://localhost:8181"
+
+auth.client-id = "benchmark-client"
+auth.client-secret = "benchmark-secret"
+
+dataset {
+ namespace-width = 2
+ namespace-depth = 3
+ tables-per-namespace = 5
+ views-per-namespace = 3
+}
+
+workload.read-update-tree-dataset {
+ read-write-ratio = 0.8
+ throughput = "50/sec"
+ duration-in-minutes = 3
+}
+```
+
+## Polaris Compatibility Mode
+
+
+**Production Warning**: Keep `polaris_enabled: false` in production. This mode is only for benchmark compatibility.
+
+
+Polaris Tools expects a Polaris-compatible API. Enable via:
+
+```yaml
+compat:
+ polaris_enabled: true
+```
+
+## CI/CD Integration
+
+### GitHub Actions Example
+
+```yaml
+name: Benchmark
+
+on:
+ push:
+ branches: [main]
+
+jobs:
+ benchmark:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version: '1.23'
+ - name: Run benchmarks
+ run: |
+ go test -bench=. -benchmem ./tests/benchmark/... | tee results.txt
+ - name: Compare with baseline
+ run: benchstat baseline.txt results.txt
+```
+
+## Troubleshooting
+
+### Java Version Error
+
+```bash
+export JAVA_HOME=$(/usr/libexec/java_home -v 17)
+```
+
+### Connection Refused
+
+```bash
+docker ps | grep bingsan-bench
+make logs
+make quick-test
+```
diff --git a/docs-site/content/docs/en/performance/index.mdx b/docs-site/content/docs/en/performance/index.mdx
new file mode 100644
index 0000000..e475dfc
--- /dev/null
+++ b/docs-site/content/docs/en/performance/index.mdx
@@ -0,0 +1,55 @@
+---
+title: Performance
+description: Performance optimization and tuning for Bingsan
+---
+
+import { Callout } from 'fumadocs-ui/components/callout';
+
+# Performance
+
+Bingsan is optimized for high-throughput metadata operations with minimal memory overhead.
+
+## Overview
+
+Bingsan implements several performance optimizations:
+
+- **Object Pooling** - Reuses memory buffers to reduce GC pressure
+- **Distributed Locking** - PostgreSQL-based locks with configurable timeouts
+- **Optimized Serialization** - Uses goccy/go-json for fast JSON encoding
+
+## Performance Targets
+
+| Metric | Target | Typical |
+|--------|--------|---------|
+| Table metadata serialization | <50ms | ~10us |
+| Large schema (100+ cols) | <200ms | ~90us |
+| Memory allocation reduction | >=30% | 19-26% |
+| GC pause (p99) | <10ms | <5ms |
+| Pool hit rate | >=80% | ~100% |
+
+## Configuration
+
+Pool and locking settings are configured in `config.yaml`:
+
+```yaml
+catalog:
+ lock_timeout: 30s
+ lock_retry_interval: 100ms
+ max_lock_retries: 100
+```
+
+Or via environment variables:
+
+```bash
+ICEBERG_CATALOG_LOCK_TIMEOUT=30s
+ICEBERG_CATALOG_LOCK_RETRY_INTERVAL=100ms
+ICEBERG_CATALOG_MAX_LOCK_RETRIES=100
+```
+
+## Sections
+
+- [Object Pooling](/docs/performance/pooling) - Memory buffer reuse for reduced allocations
+- [Distributed Locking](/docs/performance/locking) - PostgreSQL-based locking with retry logic
+- [Benchmarking](/docs/performance/benchmarking) - Load testing with Apache Polaris Tools
+- [Metrics](/docs/performance/metrics) - Prometheus metrics for monitoring
+- [Tuning](/docs/performance/tuning) - Performance tuning guidelines
diff --git a/docs-site/content/docs/en/performance/locking.mdx b/docs-site/content/docs/en/performance/locking.mdx
new file mode 100644
index 0000000..0ecc7c3
--- /dev/null
+++ b/docs-site/content/docs/en/performance/locking.mdx
@@ -0,0 +1,167 @@
+---
+title: Distributed Locking
+description: PostgreSQL-based locking with retry logic
+---
+
+# Distributed Locking
+
+Bingsan uses PostgreSQL row-level locking with configurable timeouts and retry logic to handle concurrent modifications safely across multiple instances.
+
+## Overview
+
+When multiple Bingsan instances modify the same resource simultaneously, locking ensures:
+
+- **Consistency**: Only one operation modifies a resource at a time
+- **Isolation**: Operations don't see partial states
+- **Automatic Recovery**: Failed locks are retried with backoff
+
+## Configuration
+
+Configure locking via `config.yaml`:
+
+```yaml
+catalog:
+ lock_timeout: 30s
+ lock_retry_interval: 100ms
+ max_lock_retries: 100
+```
+
+### Environment Variables
+
+```bash
+ICEBERG_CATALOG_LOCK_TIMEOUT=30s
+ICEBERG_CATALOG_LOCK_RETRY_INTERVAL=100ms
+ICEBERG_CATALOG_MAX_LOCK_RETRIES=100
+```
+
+## How It Works
+
+### Lock Acquisition Flow
+
+1. Begin Transaction
+2. SET LOCAL lock_timeout = '30s'
+3. Execute operation (SELECT ... FOR UPDATE)
+4. On Success: Commit Transaction
+5. On Lock Timeout (55P03): Rollback, wait retry_interval, retry
+6. If max_retries exceeded: Return ErrLockTimeout
+
+### PostgreSQL Lock Timeout
+
+Each transaction sets `lock_timeout` locally:
+
+```sql
+BEGIN;
+SET LOCAL lock_timeout = '30000ms';
+SELECT * FROM tables WHERE id = $1 FOR UPDATE;
+-- ... perform update ...
+COMMIT;
+```
+
+## Configuration Options
+
+| Option | Default | Description |
+|--------|---------|-------------|
+| `lock_timeout` | 30s | Max time to wait for a single lock attempt |
+| `lock_retry_interval` | 100ms | Wait time between retry attempts |
+| `max_lock_retries` | 100 | Maximum retry attempts before failing |
+
+### Total Wait Time
+
+```
+max_wait = lock_timeout + (max_lock_retries × lock_retry_interval)
+ = 30s + (100 × 100ms)
+ = 40s
+```
+
+## Tuning Guidelines
+
+### High Contention Workloads
+
+```yaml
+catalog:
+ lock_timeout: 5s
+ lock_retry_interval: 50ms
+ max_lock_retries: 200
+```
+
+### Low Contention Workloads
+
+```yaml
+catalog:
+ lock_timeout: 60s
+ lock_retry_interval: 500ms
+ max_lock_retries: 10
+```
+
+### Batch Processing
+
+```yaml
+catalog:
+ lock_timeout: 120s
+ lock_retry_interval: 1s
+ max_lock_retries: 60
+```
+
+## Error Handling
+
+### ErrLockTimeout
+
+Returned when all retries are exhausted. Client should retry the operation.
+
+### Serialization Failures
+
+PostgreSQL serialization errors (40001) are also detected and handled.
+
+## Monitoring
+
+### Lock Wait Metrics
+
+Monitor lock contention via PostgreSQL:
+
+```sql
+-- Active locks
+SELECT * FROM pg_locks WHERE NOT granted;
+
+-- Lock wait statistics
+SELECT * FROM pg_stat_activity
+WHERE wait_event_type = 'Lock';
+```
+
+### Application Metrics
+
+```text
+# Lock timeout errors
+rate(iceberg_handler_errors_total{error="lock_timeout"}[5m])
+```
+
+## Best Practices
+
+### Keep Transactions Short
+
+```go
+// Good: Minimal work inside lock
+err := db.WithLock(ctx, cfg, func(tx pgx.Tx) error {
+ return tx.Exec(ctx, "UPDATE tables SET ...")
+})
+
+// Bad: External calls inside lock
+err := db.WithLock(ctx, cfg, func(tx pgx.Tx) error {
+ callExternalService() // May be slow!
+ return tx.Exec(ctx, "UPDATE tables SET ...")
+})
+```
+
+## Troubleshooting
+
+### Frequent Lock Timeouts
+
+**Causes**:
+- High write contention on same tables
+- Long-running transactions holding locks
+- Database performance issues
+
+**Solutions**:
+1. Increase `max_lock_retries`
+2. Decrease `lock_timeout` (fail faster, retry sooner)
+3. Check for slow queries holding locks
+4. Partition workloads across different tables
diff --git a/docs-site/content/docs/en/performance/meta.json b/docs-site/content/docs/en/performance/meta.json
new file mode 100644
index 0000000..bab1140
--- /dev/null
+++ b/docs-site/content/docs/en/performance/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "Performance",
+ "pages": ["index", "pooling", "locking", "benchmarking", "metrics", "tuning"]
+}
diff --git a/docs-site/content/docs/en/performance/metrics.mdx b/docs-site/content/docs/en/performance/metrics.mdx
new file mode 100644
index 0000000..3270fa2
--- /dev/null
+++ b/docs-site/content/docs/en/performance/metrics.mdx
@@ -0,0 +1,178 @@
+---
+title: Metrics
+description: Prometheus metrics for monitoring Bingsan performance
+---
+
+# Performance Metrics
+
+Bingsan exposes performance metrics via Prometheus at the `/metrics` endpoint.
+
+## Pool Metrics
+
+Object pool utilization metrics help monitor memory efficiency.
+
+### bingsan_pool_gets_total
+
+**Type**: Counter
+**Labels**: `pool`
+
+Total number of `Get()` operations on the pool.
+
+```text
+# Get rate per pool
+rate(bingsan_pool_gets_total[5m])
+
+# Total gets by pool type
+sum by (pool) (bingsan_pool_gets_total)
+```
+
+### bingsan_pool_returns_total
+
+**Type**: Counter
+**Labels**: `pool`
+
+Total number of successful `Put()` operations returning items to the pool.
+
+```text
+# Pool efficiency (returns/gets)
+rate(bingsan_pool_returns_total{pool="buffer"}[5m])
+/ rate(bingsan_pool_gets_total{pool="buffer"}[5m])
+```
+
+### bingsan_pool_discards_total
+
+**Type**: Counter
+**Labels**: `pool`
+
+Total number of discarded items (oversized or invalid).
+
+```text
+# Discard percentage
+rate(bingsan_pool_discards_total{pool="buffer"}[5m])
+/ rate(bingsan_pool_gets_total{pool="buffer"}[5m]) * 100
+```
+
+### bingsan_pool_misses_total
+
+**Type**: Counter
+**Labels**: `pool`
+
+Total number of pool misses requiring new allocations.
+
+```text
+# Hit rate (estimated)
+1 - (rate(bingsan_pool_misses_total[5m]) / rate(bingsan_pool_gets_total[5m]))
+```
+
+## Pool Labels
+
+| Label | Values | Description |
+|-------|--------|-------------|
+| `pool` | `buffer`, `bytes` | Pool type identifier |
+
+## Alerting Rules
+
+### Low Pool Hit Rate
+
+```yaml
+- alert: LowPoolHitRate
+ expr: |
+ (rate(bingsan_pool_returns_total{pool="buffer"}[5m])
+ / rate(bingsan_pool_gets_total{pool="buffer"}[5m])) < 0.8
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Pool hit rate below 80%"
+```
+
+### High Discard Rate
+
+```yaml
+- alert: HighPoolDiscardRate
+ expr: rate(bingsan_pool_discards_total{pool="buffer"}[5m]) > 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "High pool discard rate"
+```
+
+## Recording Rules
+
+Pre-compute expensive queries:
+
+```yaml
+groups:
+ - name: bingsan_pool_recording
+ interval: 30s
+ rules:
+ - record: bingsan:pool_hit_rate:5m
+ expr: |
+ rate(bingsan_pool_returns_total[5m])
+ / rate(bingsan_pool_gets_total[5m])
+
+ - record: bingsan:pool_discard_rate:5m
+ expr: rate(bingsan_pool_discards_total[5m])
+```
+
+## Interpreting Metrics
+
+### Healthy Pool
+
+```
+gets_total: 1,000,000
+returns_total: 1,000,000
+discards_total: 50
+misses_total: 100
+
+Utilization: 100% (returns = gets)
+Discard rate: 0.005%
+Miss rate: 0.01%
+```
+
+### Pool with Leaks
+
+```
+gets_total: 1,000,000
+returns_total: 800,000 ← 200,000 missing!
+discards_total: 100
+misses_total: 200,100 ← High misses
+
+Utilization: 80%
+Miss rate: 20%
+```
+
+**Action**: Check for missing `defer pool.Put()` calls.
+
+### Pool with Large Responses
+
+```
+gets_total: 1,000,000
+returns_total: 700,000
+discards_total: 300,000 ← High discards!
+misses_total: 50
+
+Utilization: 70%
+Discard rate: 30%
+```
+
+**Action**: Consider increasing `MaxBufferSize` if schemas are large.
+
+## Health Check Endpoints
+
+### /health
+
+Basic health check (returns 200 if healthy):
+
+```bash
+curl http://localhost:8181/health
+```
+
+### /metrics
+
+Prometheus metrics endpoint:
+
+```bash
+curl http://localhost:8181/metrics | grep bingsan_pool
+```
diff --git a/docs-site/content/docs/en/performance/pooling.mdx b/docs-site/content/docs/en/performance/pooling.mdx
new file mode 100644
index 0000000..5b48328
--- /dev/null
+++ b/docs-site/content/docs/en/performance/pooling.mdx
@@ -0,0 +1,125 @@
+---
+title: Object Pooling
+description: Memory buffer reuse for reduced allocations
+---
+
+# Object Pooling
+
+Bingsan uses `sync.Pool` from Go's standard library to reduce memory allocation pressure in hot paths.
+
+## Overview
+
+Two types of pools are implemented:
+
+| Pool | Purpose | Default Size | Max Size |
+|------|---------|--------------|----------|
+| **BufferPool** | JSON serialization buffers | 4 KB | 64 KB |
+| **BytePool** | OAuth token generation | 32 bytes | 32 bytes |
+
+## How It Works
+
+### BufferPool
+
+The `BufferPool` provides reusable `bytes.Buffer` instances for JSON serialization:
+
+```
+Request 1 ──► Get buffer ──► Serialize JSON ──► Return buffer ──► Pool
+ │ ▲
+ └──────────────────────────────────┘
+ Reused
+```
+
+**Key characteristics:**
+- Initial capacity: 4 KB (typical JSON metadata size)
+- Maximum size: 64 KB (oversized buffers are discarded)
+- Thread-safe via `sync.Pool`
+- Automatic reset on get
+
+### BytePool
+
+Fixed size: 32 bytes for OAuth access token generation.
+
+## Usage Patterns
+
+### In API Handlers
+
+```go
+func (h *Handler) GetTable(ctx *fiber.Ctx) error {
+ buf := pool.GetBuffer()
+ defer pool.PutBuffer(buf) // Always return!
+
+ encoder := json.NewEncoder(buf)
+ if err := encoder.Encode(table); err != nil {
+ return err
+ }
+
+ return ctx.Send(buf.Bytes())
+}
+```
+
+## Configuration
+
+| Constant | Value | Description |
+|----------|-------|-------------|
+| `DefaultBufferSize` | 4096 | Initial buffer capacity in bytes |
+| `MaxBufferSize` | 65536 | Maximum buffer size before discard |
+| `TokenSize` | 32 | Fixed size for token byte slices |
+
+## Best Practices
+
+### Always Use `defer`
+
+```go
+buf := pool.GetBuffer()
+defer pool.PutBuffer(buf) // Guaranteed return
+```
+
+### Don't Hold References
+
+```go
+// Wrong: Reference escapes
+data := buf.Bytes()
+pool.PutBuffer(buf)
+return data // data is now invalid!
+
+// Correct: Copy if needed
+data := make([]byte, buf.Len())
+copy(data, buf.Bytes())
+pool.PutBuffer(buf)
+return data
+```
+
+## Metrics
+
+Pool performance is exposed via Prometheus:
+
+| Metric | Type | Description |
+|--------|------|-------------|
+| `bingsan_pool_gets_total` | Counter | Total Get() operations |
+| `bingsan_pool_returns_total` | Counter | Total Put() operations |
+| `bingsan_pool_discards_total` | Counter | Oversized items discarded |
+| `bingsan_pool_misses_total` | Counter | New allocations (pool empty) |
+
+## Benchmarks
+
+```bash
+go test -bench=BenchmarkPool -benchmem ./tests/benchmark/...
+```
+
+Expected results:
+
+| Benchmark | Time | Allocs |
+|-----------|------|--------|
+| BufferPool.Get/Put | ~50ns | 0 |
+| BufferPool.Concurrent | ~100ns | 0 |
+| BytePool.Get/Put | ~30ns | 0 |
+
+## Troubleshooting
+
+### High Discard Rate
+
+If `bingsan_pool_discards_total` is increasing rapidly:
+
+1. **Cause**: Many large responses exceeding 64KB
+2. **Impact**: Reduced pool effectiveness
+3. **Solution**: Consider increasing `MaxBufferSize` for schemas with 100+ columns
diff --git a/docs-site/content/docs/en/performance/tuning.mdx b/docs-site/content/docs/en/performance/tuning.mdx
new file mode 100644
index 0000000..05add0a
--- /dev/null
+++ b/docs-site/content/docs/en/performance/tuning.mdx
@@ -0,0 +1,214 @@
+---
+title: Tuning
+description: Performance tuning guidelines for Bingsan
+---
+
+# Performance Tuning
+
+This guide covers how to tune Bingsan for optimal performance based on your workload characteristics.
+
+## Quick Reference
+
+| Workload | Lock Timeout | Retry Interval | Max Retries | Buffer Size |
+|----------|--------------|----------------|-------------|-------------|
+| Low latency | 5s | 50ms | 20 | 4KB |
+| High throughput | 30s | 100ms | 100 | 4KB |
+| Large schemas | 30s | 100ms | 100 | 8-16KB |
+| Batch processing | 120s | 1s | 60 | 4KB |
+
+## Workload Profiles
+
+### Low Latency
+
+Prioritize fast responses over throughput:
+
+```yaml
+catalog:
+ lock_timeout: 5s
+ lock_retry_interval: 50ms
+ max_lock_retries: 20
+
+server:
+ read_timeout: 10s
+ write_timeout: 10s
+
+database:
+ max_open_conns: 50
+ max_idle_conns: 25
+```
+
+### High Throughput
+
+Maximize requests per second:
+
+```yaml
+catalog:
+ lock_timeout: 30s
+ lock_retry_interval: 100ms
+ max_lock_retries: 100
+
+server:
+ read_timeout: 60s
+ write_timeout: 60s
+ idle_timeout: 300s
+
+database:
+ max_open_conns: 100
+ max_idle_conns: 50
+ conn_max_lifetime: 30m
+```
+
+### Batch Processing
+
+For bulk operations:
+
+```yaml
+catalog:
+ lock_timeout: 120s
+ lock_retry_interval: 1s
+ max_lock_retries: 60
+
+server:
+ read_timeout: 300s
+ write_timeout: 300s
+
+database:
+ max_open_conns: 25
+ conn_max_lifetime: 60m
+```
+
+## Tuning by Symptom
+
+### High Latency
+
+**Diagnosis:**
+```text
+# Check lock wait time
+rate(iceberg_db_wait_duration_seconds_total[5m])
+
+# Check pool discard rate
+rate(bingsan_pool_discards_total[5m])
+
+# Check connection saturation
+iceberg_db_connections_in_use / iceberg_db_connections_max
+```
+
+**Solutions:**
+1. Lock contention - Reduce `lock_timeout`, increase `max_lock_retries`
+2. Pool discards - Increase `MaxBufferSize` for large schemas
+3. Connection pool - Increase `max_open_conns`
+
+### High Memory Usage
+
+**Diagnosis:**
+```bash
+curl http://localhost:8181/debug/pprof/heap > heap.prof
+go tool pprof heap.prof
+```
+
+**Solutions:**
+1. Buffer leaks - Check all code paths return buffers
+2. Large buffers - Reduce `MaxBufferSize`
+3. Connection bloat - Reduce `max_open_conns`
+
+### Lock Timeout Errors
+
+**Diagnosis:**
+```sql
+SELECT * FROM pg_locks WHERE NOT granted;
+SELECT * FROM pg_stat_activity WHERE wait_event_type = 'Lock';
+```
+
+**Solutions:**
+1. High contention - Increase `max_lock_retries`
+2. Slow transactions - Keep transactions short
+3. Deadlocks - Bingsan handles these automatically
+
+## Database Tuning
+
+### PostgreSQL Settings
+
+```sql
+ALTER SYSTEM SET max_connections = 500;
+ALTER SYSTEM SET lock_timeout = '30s';
+ALTER SYSTEM SET statement_timeout = '60s';
+ALTER SYSTEM SET effective_cache_size = '12GB';
+ALTER SYSTEM SET shared_buffers = '4GB';
+```
+
+### Connection Pooling with PgBouncer
+
+```ini
+[databases]
+iceberg_catalog = host=postgres port=5432 dbname=iceberg_catalog
+
+[pgbouncer]
+pool_mode = transaction
+max_client_conn = 1000
+default_pool_size = 50
+```
+
+## Resource Sizing
+
+### Memory
+
+```
+memory_per_instance = base + (concurrent_requests × request_memory)
+ ≈ 50MB + (500 × 100KB)
+ ≈ 100MB typical
+ ≈ 200MB peak
+```
+
+### CPU
+
+```
+cpu_per_instance ≈ 0.2 cores idle
+ ≈ 1 core under load
+```
+
+### Instances
+
+```
+instances = (peak_rps / rps_per_instance) × 1.5
+```
+
+## Profiling
+
+### CPU Profile
+
+```bash
+curl http://localhost:8181/debug/pprof/profile?seconds=30 > cpu.prof
+go tool pprof -http=:8080 cpu.prof
+```
+
+### Memory Profile
+
+```bash
+curl http://localhost:8181/debug/pprof/heap > heap.prof
+go tool pprof -http=:8080 heap.prof
+```
+
+### Trace
+
+```bash
+curl http://localhost:8181/debug/pprof/trace?seconds=5 > trace.out
+go tool trace trace.out
+```
+
+## Checklist
+
+### Pre-Production
+
+- [ ] Set appropriate `lock_timeout` for your workload
+- [ ] Configure `max_open_conns` based on expected load
+- [ ] Enable Prometheus metrics collection
+- [ ] Set up alerting for pool health
+- [ ] Run load tests with realistic data
+
+### Production Monitoring
+
+- [ ] Pool hit rate > 80%
+- [ ] Pool discard rate < 1%
+- [ ] Lock timeout rate < 1%
+- [ ] Connection utilization < 90%
+- [ ] GC pause p99 < 10ms
diff --git a/docs-site/content/docs/getting-started/_index.md b/docs-site/content/docs/getting-started/_index.md
deleted file mode 100644
index 90cb859..0000000
--- a/docs-site/content/docs/getting-started/_index.md
+++ /dev/null
@@ -1,23 +0,0 @@
----
-title: "Getting Started"
-weight: 1
-bookCollapseSection: true
----
-
-# Getting Started
-
-This section covers everything you need to get Bingsan up and running.
-
-## Prerequisites
-
-Before installing Bingsan, ensure you have the following:
-
-- **Go 1.25+** (for building from source)
-- **PostgreSQL 15+** (metadata storage)
-- **Docker & Docker Compose** (recommended for development)
-
-## Sections
-
-- [Quick Start]({{< relref "/docs/getting-started/quick-start" >}}) - Get running in 5 minutes
-- [Installation]({{< relref "/docs/getting-started/installation" >}}) - Detailed installation options
-- [First Steps]({{< relref "/docs/getting-started/first-steps" >}}) - Create your first namespace and table
diff --git a/docs-site/content/docs/ko/api/index.mdx b/docs-site/content/docs/ko/api/index.mdx
new file mode 100644
index 0000000..dde6342
--- /dev/null
+++ b/docs-site/content/docs/ko/api/index.mdx
@@ -0,0 +1,82 @@
+---
+title: API 레퍼런스
+description: Apache Iceberg REST Catalog 사양을 준수하는 Bingsan의 완전한 REST API 문서
+---
+
+# API 레퍼런스
+
+Apache Iceberg REST Catalog 사양을 준수하는 Bingsan의 완전한 REST API 문서입니다.
+
+## 기본 URL
+
+```
+http://localhost:8181/v1
+```
+
+## 인증
+
+인증이 활성화되면 요청에 Bearer 토큰을 포함합니다:
+
+```bash
+curl -H "Authorization: Bearer " http://localhost:8181/v1/namespaces
+```
+
+자세한 내용은 [인증 설정](/docs/configuration/auth)을 참조하세요.
+
+## Content Type
+
+본문이 있는 모든 요청에는 다음을 사용해야 합니다:
+
+```
+Content-Type: application/json
+```
+
+## API 섹션
+
+### 핵심 작업
+
+- [네임스페이스](/docs/api/namespaces) - 네임스페이스 CRUD 작업
+- [테이블](/docs/api/tables) - 테이블 관리 및 커밋
+- [뷰](/docs/api/views) - 뷰 관리
+
+## 오류 응답
+
+모든 오류 응답은 다음 형식을 따릅니다:
+
+```json
+{
+ "error": {
+ "message": "오류 설명",
+ "type": "ErrorType",
+ "code": 400
+ }
+}
+```
+
+### 일반적인 오류 유형
+
+| 코드 | 유형 | 설명 |
+|------|------|-------------|
+| 400 | `BadRequestException` | 잘못된 요청 매개변수 |
+| 401 | `UnauthorizedException` | 인증 누락 또는 무효 |
+| 403 | `ForbiddenException` | 권한 거부 |
+| 404 | `NoSuchNamespaceException` | 네임스페이스를 찾을 수 없음 |
+| 404 | `NoSuchTableException` | 테이블을 찾을 수 없음 |
+| 404 | `NoSuchViewException` | 뷰를 찾을 수 없음 |
+| 409 | `AlreadyExistsException` | 리소스가 이미 존재함 |
+| 409 | `CommitFailedException` | 낙관적 잠금 실패 |
+| 500 | `ServerError` | 내부 서버 오류 |
+
+## 카탈로그 프리픽스
+
+Bingsan은 멀티 카탈로그 배포를 위한 프리픽스 API 경로를 지원합니다:
+
+```
+/v1/{prefix}/namespaces
+/v1/{prefix}/namespaces/{namespace}/tables
+```
+
+예시:
+```bash
+curl http://localhost:8181/v1/my-catalog/namespaces
+```
diff --git a/docs-site/content/docs/ko/api/meta.json b/docs-site/content/docs/ko/api/meta.json
new file mode 100644
index 0000000..02d8570
--- /dev/null
+++ b/docs-site/content/docs/ko/api/meta.json
@@ -0,0 +1,9 @@
+{
+ "title": "API 레퍼런스",
+ "pages": [
+ "index",
+ "namespaces",
+ "tables",
+ "views"
+ ]
+}
diff --git a/docs-site/content/docs/ko/api/namespaces.mdx b/docs-site/content/docs/ko/api/namespaces.mdx
new file mode 100644
index 0000000..c4a0bdc
--- /dev/null
+++ b/docs-site/content/docs/ko/api/namespaces.mdx
@@ -0,0 +1,269 @@
+---
+title: 네임스페이스 API
+description: 데이터 컨테이너 관리를 위한 네임스페이스 CRUD 작업
+---
+
+# 네임스페이스 API
+
+네임스페이스는 테이블과 뷰의 컨테이너로, 기존 시스템의 데이터베이스나 스키마와 유사합니다.
+
+## 네임스페이스 목록 조회
+
+모든 네임스페이스를 조회하며, 선택적으로 상위 네임스페이스로 필터링할 수 있습니다.
+
+### 요청
+
+```http
+GET /v1/namespaces
+GET /v1/namespaces?parent={parent}
+GET /v1/namespaces?pageToken={token}&pageSize={size}
+```
+
+### 쿼리 파라미터
+
+| 파라미터 | 타입 | 필수 | 설명 |
+|-----------|------|----------|-------------|
+| `parent` | string | 아니오 | 하위 항목을 필터링할 상위 네임스페이스 |
+| `pageToken` | string | 아니오 | 이전 응답의 페이지네이션 토큰 |
+| `pageSize` | integer | 아니오 | 최대 결과 수 (기본값: 100) |
+
+### 응답
+
+```json
+{
+ "namespaces": [
+ ["analytics"],
+ ["analytics", "events"],
+ ["raw"]
+ ],
+ "next-page-token": "eyJvZmZzZXQiOjEwMH0="
+}
+```
+
+### 예시
+
+```bash
+# 모든 네임스페이스 조회
+curl http://localhost:8181/v1/namespaces
+
+# "analytics"의 하위 네임스페이스 조회
+curl "http://localhost:8181/v1/namespaces?parent=analytics"
+```
+
+---
+
+## 네임스페이스 생성
+
+새 네임스페이스를 생성합니다.
+
+### 요청
+
+```http
+POST /v1/namespaces
+```
+
+### 요청 본문
+
+```json
+{
+ "namespace": ["analytics", "events"],
+ "properties": {
+ "owner": "data-team",
+ "description": "이벤트 분석 데이터"
+ }
+}
+```
+
+### 요청 필드
+
+| 필드 | 타입 | 필수 | 설명 |
+|-------|------|----------|-------------|
+| `namespace` | array[string] | 예 | 이름 부분의 배열로 된 네임스페이스 식별자 |
+| `properties` | object | 아니오 | 네임스페이스의 키-값 속성 |
+
+### 응답
+
+```json
+{
+ "namespace": ["analytics", "events"],
+ "properties": {
+ "owner": "data-team",
+ "description": "이벤트 분석 데이터"
+ }
+}
+```
+
+### 오류
+
+| 코드 | 오류 | 설명 |
+|------|-------|-------------|
+| 400 | `BadRequestException` | 잘못된 네임스페이스 형식 |
+| 409 | `AlreadyExistsException` | 네임스페이스가 이미 존재함 |
+
+### 예시
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces \
+ -H "Content-Type: application/json" \
+ -d '{
+ "namespace": ["analytics"],
+ "properties": {
+ "owner": "platform-team"
+ }
+ }'
+```
+
+---
+
+## 네임스페이스 조회
+
+속성을 포함한 네임스페이스 메타데이터를 로드합니다.
+
+### 요청
+
+```http
+GET /v1/namespaces/{namespace}
+```
+
+### 경로 파라미터
+
+| 파라미터 | 타입 | 설명 |
+|-----------|------|-------------|
+| `namespace` | string | URL 인코딩된 네임스페이스 |
+
+### 응답
+
+```json
+{
+ "namespace": ["analytics", "events"],
+ "properties": {
+ "owner": "data-team",
+ "description": "이벤트 분석 데이터",
+ "created_at": "2024-01-15T10:30:00Z"
+ }
+}
+```
+
+### 오류
+
+| 코드 | 오류 | 설명 |
+|------|-------|-------------|
+| 404 | `NoSuchNamespaceException` | 네임스페이스가 존재하지 않음 |
+
+### 예시
+
+```bash
+# 단순 네임스페이스
+curl http://localhost:8181/v1/namespaces/analytics
+
+# 중첩 네임스페이스 (구분자를 URL 인코딩)
+curl http://localhost:8181/v1/namespaces/analytics%1Fevents
+```
+
+---
+
+## 네임스페이스 존재 확인
+
+메타데이터를 로드하지 않고 네임스페이스가 존재하는지 확인합니다.
+
+### 요청
+
+```http
+HEAD /v1/namespaces/{namespace}
+```
+
+### 응답
+
+- **200 OK**: 네임스페이스 존재
+- **404 Not Found**: 네임스페이스가 존재하지 않음
+
+### 예시
+
+```bash
+curl -I http://localhost:8181/v1/namespaces/analytics
+```
+
+---
+
+## 네임스페이스 속성 업데이트
+
+네임스페이스의 속성을 업데이트하거나 제거합니다.
+
+### 요청
+
+```http
+POST /v1/namespaces/{namespace}/properties
+```
+
+### 요청 본문
+
+```json
+{
+ "updates": {
+ "owner": "new-team",
+ "retention": "90d"
+ },
+ "removals": ["description", "deprecated_key"]
+}
+```
+
+### 요청 필드
+
+| 필드 | 타입 | 필수 | 설명 |
+|-------|------|----------|-------------|
+| `updates` | object | 아니오 | 추가하거나 업데이트할 속성 |
+| `removals` | array[string] | 아니오 | 제거할 속성 키 |
+
+### 응답
+
+```json
+{
+ "updated": ["owner", "retention"],
+ "removed": ["description"],
+ "missing": ["deprecated_key"]
+}
+```
+
+### 예시
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces/analytics/properties \
+ -H "Content-Type: application/json" \
+ -d '{
+ "updates": {"owner": "platform-team"},
+ "removals": ["old_property"]
+ }'
+```
+
+---
+
+## 네임스페이스 삭제
+
+빈 네임스페이스를 삭제합니다.
+
+### 요청
+
+```http
+DELETE /v1/namespaces/{namespace}
+```
+
+### 응답
+
+- **204 No Content**: 네임스페이스가 성공적으로 삭제됨
+
+### 오류
+
+| 코드 | 오류 | 설명 |
+|------|-------|-------------|
+| 404 | `NoSuchNamespaceException` | 네임스페이스가 존재하지 않음 |
+| 409 | `NamespaceNotEmptyException` | 네임스페이스에 테이블 또는 뷰가 포함됨 |
+
+### 예시
+
+```bash
+curl -X DELETE http://localhost:8181/v1/namespaces/analytics
+```
+
+
+네임스페이스를 삭제하기 전에 먼저 모든 테이블과 뷰를 삭제해야 합니다.
+
diff --git a/docs-site/content/docs/ko/api/tables.mdx b/docs-site/content/docs/ko/api/tables.mdx
new file mode 100644
index 0000000..ec4c146
--- /dev/null
+++ b/docs-site/content/docs/ko/api/tables.mdx
@@ -0,0 +1,355 @@
+---
+title: 테이블 API
+description: CRUD 및 커밋을 포함한 테이블 관리 작업
+---
+
+# 테이블 API
+
+테이블은 Iceberg의 주요 데이터 컨테이너로, 스키마, 파티션 사양 및 메타데이터를 포함합니다.
+
+## 테이블 목록 조회
+
+네임스페이스의 모든 테이블을 조회합니다.
+
+### 요청
+
+```http
+GET /v1/namespaces/{namespace}/tables
+GET /v1/namespaces/{namespace}/tables?pageToken={token}&pageSize={size}
+```
+
+### 쿼리 파라미터
+
+| 파라미터 | 타입 | 필수 | 설명 |
+|-----------|------|----------|-------------|
+| `pageToken` | string | 아니오 | 페이지네이션 토큰 |
+| `pageSize` | integer | 아니오 | 최대 결과 수 (기본값: 100) |
+
+### 응답
+
+```json
+{
+ "identifiers": [
+ {"namespace": ["analytics"], "name": "user_events"},
+ {"namespace": ["analytics"], "name": "page_views"}
+ ],
+ "next-page-token": null
+}
+```
+
+### 예시
+
+```bash
+curl http://localhost:8181/v1/namespaces/analytics/tables
+```
+
+---
+
+## 테이블 생성
+
+새 Iceberg 테이블을 생성합니다.
+
+### 요청
+
+```http
+POST /v1/namespaces/{namespace}/tables
+```
+
+### 요청 본문
+
+```json
+{
+ "name": "user_events",
+ "location": "s3://bucket/warehouse/analytics/user_events",
+ "schema": {
+ "type": "struct",
+ "schema-id": 0,
+ "fields": [
+ {"id": 1, "name": "event_id", "required": true, "type": "string"},
+ {"id": 2, "name": "user_id", "required": true, "type": "long"},
+ {"id": 3, "name": "event_type", "required": true, "type": "string"},
+ {"id": 4, "name": "event_time", "required": true, "type": "timestamptz"}
+ ]
+ },
+ "partition-spec": {
+ "spec-id": 0,
+ "fields": [
+ {"source-id": 4, "field-id": 1000, "name": "event_day", "transform": "day"}
+ ]
+ },
+ "properties": {
+ "format-version": "2",
+ "write.parquet.compression-codec": "zstd"
+ }
+}
+```
+
+### 요청 필드
+
+| 필드 | 타입 | 필수 | 설명 |
+|-------|------|----------|-------------|
+| `name` | string | 예 | 테이블 이름 |
+| `location` | string | 아니오 | 사용자 정의 테이블 위치 (생략 시 웨어하우스 기본값 사용) |
+| `schema` | object | 예 | Iceberg 스키마 정의 |
+| `partition-spec` | object | 아니오 | 파티션 사양 |
+| `write-order` | object | 아니오 | 쓰기용 정렬 순서 |
+| `stage-create` | boolean | 아니오 | true인 경우 스테이지된 테이블 생성 (데이터 파일 없음) |
+| `properties` | object | 아니오 | 테이블 속성 |
+
+### 응답
+
+전체 테이블 메타데이터를 반환합니다:
+
+```json
+{
+ "metadata-location": "s3://bucket/warehouse/analytics/user_events/metadata/00000-uuid.metadata.json",
+ "metadata": {
+ "format-version": 2,
+ "table-uuid": "550e8400-e29b-41d4-a716-446655440000",
+ "location": "s3://bucket/warehouse/analytics/user_events",
+ "last-updated-ms": 1705312200000,
+ "schema": { "..." },
+ "current-schema-id": 0,
+ "partition-spec": [],
+ "properties": {}
+ }
+}
+```
+
+### 예시
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces/analytics/tables \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "events",
+ "schema": {
+ "type": "struct",
+ "schema-id": 0,
+ "fields": [
+ {"id": 1, "name": "id", "required": true, "type": "long"},
+ {"id": 2, "name": "data", "required": false, "type": "string"}
+ ]
+ }
+ }'
+```
+
+---
+
+## 테이블 로드
+
+테이블의 현재 메타데이터를 로드합니다.
+
+### 요청
+
+```http
+GET /v1/namespaces/{namespace}/tables/{table}
+GET /v1/namespaces/{namespace}/tables/{table}?snapshots={snapshots}
+```
+
+### 쿼리 파라미터
+
+| 파라미터 | 타입 | 설명 |
+|-----------|------|-------------|
+| `snapshots` | string | 모든 스냅샷 포함은 `all`, 참조된 것만은 `refs` |
+
+### 응답
+
+```json
+{
+ "metadata-location": "s3://bucket/warehouse/analytics/events/metadata/00001-uuid.metadata.json",
+ "metadata": {
+ "format-version": 2,
+ "table-uuid": "550e8400-e29b-41d4-a716-446655440000",
+ "location": "s3://bucket/warehouse/analytics/events",
+ "current-snapshot-id": 123456789,
+ "snapshots": [
+ {
+ "snapshot-id": 123456789,
+ "timestamp-ms": 1705312200000,
+ "summary": {
+ "operation": "append",
+ "added-data-files": "10",
+ "added-records": "1000"
+ }
+ }
+ ]
+ }
+}
+```
+
+### 예시
+
+```bash
+curl http://localhost:8181/v1/namespaces/analytics/tables/events
+```
+
+---
+
+## 테이블 존재 확인
+
+테이블이 존재하는지 확인합니다.
+
+### 요청
+
+```http
+HEAD /v1/namespaces/{namespace}/tables/{table}
+```
+
+### 응답
+
+- **200 OK**: 테이블 존재
+- **404 Not Found**: 테이블이 존재하지 않음
+
+### 예시
+
+```bash
+curl -I http://localhost:8181/v1/namespaces/analytics/tables/events
+```
+
+---
+
+## 테이블 업데이트 커밋
+
+테이블에 업데이트를 커밋합니다 (스키마 변경, 새 스냅샷, 속성 업데이트).
+
+### 요청
+
+```http
+POST /v1/namespaces/{namespace}/tables/{table}
+```
+
+### 요구사항
+
+| 타입 | 필드 | 설명 |
+|------|--------|-------------|
+| `assert-create` | - | 테이블이 존재하지 않음을 확인 |
+| `assert-table-uuid` | `uuid` | 테이블 UUID가 일치하는지 확인 |
+| `assert-ref-snapshot-id` | `ref`, `snapshot-id` | ref가 스냅샷을 가리키는지 확인 |
+| `assert-current-schema-id` | `current-schema-id` | 현재 스키마 ID 확인 |
+| `assert-default-spec-id` | `default-spec-id` | 기본 파티션 사양 확인 |
+
+### 업데이트 액션
+
+| 액션 | 필드 | 설명 |
+|--------|--------|-------------|
+| `upgrade-format-version` | `format-version` | 포맷 버전 업그레이드 |
+| `add-schema` | `schema` | 새 스키마 추가 |
+| `set-current-schema` | `schema-id` | 현재 스키마 설정 |
+| `add-partition-spec` | `spec` | 파티션 사양 추가 |
+| `set-default-spec` | `spec-id` | 기본 파티션 사양 설정 |
+| `add-snapshot` | `snapshot` | 스냅샷 추가 |
+| `set-snapshot-ref` | `ref-name`, `type`, `snapshot-id` | 스냅샷 참조 설정 |
+| `remove-snapshots` | `snapshot-ids` | 스냅샷 제거 |
+| `set-properties` | `updates` | 속성 업데이트 |
+| `remove-properties` | `removals` | 속성 제거 |
+
+### 오류
+
+| 코드 | 오류 | 설명 |
+|------|-------|-------------|
+| 404 | `NoSuchTableException` | 테이블을 찾을 수 없음 |
+| 409 | `CommitFailedException` | 요구사항이 충족되지 않음 |
+
+---
+
+## 테이블 삭제
+
+테이블을 삭제합니다.
+
+### 요청
+
+```http
+DELETE /v1/namespaces/{namespace}/tables/{table}
+DELETE /v1/namespaces/{namespace}/tables/{table}?purgeRequested={true|false}
+```
+
+### 쿼리 파라미터
+
+| 파라미터 | 타입 | 설명 |
+|-----------|------|-------------|
+| `purgeRequested` | boolean | true인 경우 데이터 파일 삭제 (기본값: false) |
+
+### 응답
+
+- **204 No Content**: 테이블 삭제됨
+
+### 예시
+
+```bash
+# 테이블 삭제, 데이터 파일 유지
+curl -X DELETE http://localhost:8181/v1/namespaces/analytics/tables/events
+
+# 테이블 삭제 및 데이터 퍼지
+curl -X DELETE "http://localhost:8181/v1/namespaces/analytics/tables/events?purgeRequested=true"
+```
+
+---
+
+## 테이블 등록
+
+메타데이터 파일에서 기존 테이블을 등록합니다.
+
+### 요청
+
+```http
+POST /v1/namespaces/{namespace}/register
+```
+
+### 요청 본문
+
+```json
+{
+ "name": "imported_table",
+ "metadata-location": "s3://bucket/path/to/metadata.json"
+}
+```
+
+### 예시
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces/analytics/register \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "imported",
+ "metadata-location": "s3://bucket/tables/imported/metadata/00000.metadata.json"
+ }'
+```
+
+---
+
+## 테이블 이름 변경
+
+테이블의 이름을 변경하거나 다른 네임스페이스로 이동합니다.
+
+### 요청
+
+```http
+POST /v1/tables/rename
+```
+
+### 요청 본문
+
+```json
+{
+ "source": {
+ "namespace": ["analytics"],
+ "name": "old_name"
+ },
+ "destination": {
+ "namespace": ["analytics"],
+ "name": "new_name"
+ }
+}
+```
+
+### 예시
+
+```bash
+curl -X POST http://localhost:8181/v1/tables/rename \
+ -H "Content-Type: application/json" \
+ -d '{
+ "source": {"namespace": ["analytics"], "name": "events"},
+ "destination": {"namespace": ["analytics"], "name": "user_events"}
+ }'
+```
diff --git a/docs-site/content/docs/ko/api/views.mdx b/docs-site/content/docs/ko/api/views.mdx
new file mode 100644
index 0000000..84839a4
--- /dev/null
+++ b/docs-site/content/docs/ko/api/views.mdx
@@ -0,0 +1,335 @@
+---
+title: 뷰 API
+description: 명명된 SQL 쿼리를 위한 뷰 관리 작업
+---
+
+# 뷰 API
+
+뷰는 테이블처럼 참조할 수 있는 명명된 SQL 쿼리를 정의하는 방법을 제공합니다.
+
+## 뷰 목록 조회
+
+네임스페이스의 모든 뷰를 조회합니다.
+
+### 요청
+
+```http
+GET /v1/namespaces/{namespace}/views
+GET /v1/namespaces/{namespace}/views?pageToken={token}&pageSize={size}
+```
+
+### 쿼리 파라미터
+
+| 파라미터 | 타입 | 필수 | 설명 |
+|-----------|------|----------|-------------|
+| `pageToken` | string | 아니오 | 페이지네이션 토큰 |
+| `pageSize` | integer | 아니오 | 최대 결과 수 (기본값: 100) |
+
+### 응답
+
+```json
+{
+ "identifiers": [
+ {"namespace": ["analytics"], "name": "daily_events"},
+ {"namespace": ["analytics"], "name": "user_summary"}
+ ],
+ "next-page-token": null
+}
+```
+
+### 예시
+
+```bash
+curl http://localhost:8181/v1/namespaces/analytics/views
+```
+
+---
+
+## 뷰 생성
+
+새 뷰를 생성합니다.
+
+### 요청
+
+```http
+POST /v1/namespaces/{namespace}/views
+```
+
+### 요청 본문
+
+```json
+{
+ "name": "daily_events",
+ "schema": {
+ "type": "struct",
+ "schema-id": 0,
+ "fields": [
+ {"id": 1, "name": "event_date", "required": true, "type": "date"},
+ {"id": 2, "name": "event_count", "required": true, "type": "long"},
+ {"id": 3, "name": "unique_users", "required": true, "type": "long"}
+ ]
+ },
+ "view-version": {
+ "version-id": 1,
+ "schema-id": 0,
+ "timestamp-ms": 1705312200000,
+ "summary": {
+ "engine-name": "spark",
+ "engine-version": "3.5.0"
+ },
+ "representations": [
+ {
+ "type": "sql",
+ "sql": "SELECT DATE(event_time) AS event_date, COUNT(*) AS event_count, COUNT(DISTINCT user_id) AS unique_users FROM analytics.user_events GROUP BY DATE(event_time)",
+ "dialect": "spark"
+ }
+ ],
+ "default-catalog": "bingsan",
+ "default-namespace": ["analytics"]
+ },
+ "properties": {
+ "owner": "data-team"
+ }
+}
+```
+
+### 요청 필드
+
+| 필드 | 타입 | 필수 | 설명 |
+|-------|------|----------|-------------|
+| `name` | string | 예 | 뷰 이름 |
+| `location` | string | 아니오 | 사용자 정의 뷰 위치 |
+| `schema` | object | 예 | 뷰의 출력 스키마 |
+| `view-version` | object | 예 | SQL 정의가 포함된 뷰 버전 |
+| `properties` | object | 아니오 | 뷰 속성 |
+
+### 뷰 버전 필드
+
+| 필드 | 타입 | 필수 | 설명 |
+|-------|------|----------|-------------|
+| `version-id` | integer | 예 | 버전 식별자 |
+| `schema-id` | integer | 예 | 이 버전의 스키마 ID |
+| `timestamp-ms` | long | 예 | 생성 타임스탬프 |
+| `summary` | object | 아니오 | 엔진 정보 |
+| `representations` | array | 예 | SQL 표현 |
+| `default-catalog` | string | 아니오 | 정규화되지 않은 이름의 기본 카탈로그 |
+| `default-namespace` | array | 아니오 | 기본 네임스페이스 |
+
+### 예시
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces/analytics/views \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "daily_summary",
+ "schema": {
+ "type": "struct",
+ "schema-id": 0,
+ "fields": [
+ {"id": 1, "name": "day", "required": true, "type": "date"},
+ {"id": 2, "name": "total", "required": true, "type": "long"}
+ ]
+ },
+ "view-version": {
+ "version-id": 1,
+ "schema-id": 0,
+ "timestamp-ms": 1705312200000,
+ "representations": [
+ {
+ "type": "sql",
+ "sql": "SELECT DATE(ts) as day, COUNT(*) as total FROM events GROUP BY 1",
+ "dialect": "spark"
+ }
+ ]
+ }
+ }'
+```
+
+---
+
+## 뷰 로드
+
+뷰의 현재 메타데이터를 로드합니다.
+
+### 요청
+
+```http
+GET /v1/namespaces/{namespace}/views/{view}
+```
+
+### 응답
+
+```json
+{
+ "metadata-location": "s3://bucket/warehouse/views/daily_events/metadata/00001-uuid.metadata.json",
+ "metadata": {
+ "view-uuid": "550e8400-e29b-41d4-a716-446655440000",
+ "format-version": 1,
+ "location": "s3://bucket/warehouse/views/daily_events",
+ "current-version-id": 1,
+ "versions": [
+ {
+ "version-id": 1,
+ "schema-id": 0,
+ "timestamp-ms": 1705312200000,
+ "representations": [...]
+ }
+ ],
+ "properties": {
+ "owner": "data-team"
+ }
+ }
+}
+```
+
+### 예시
+
+```bash
+curl http://localhost:8181/v1/namespaces/analytics/views/daily_events
+```
+
+---
+
+## 뷰 존재 확인
+
+뷰가 존재하는지 확인합니다.
+
+### 요청
+
+```http
+HEAD /v1/namespaces/{namespace}/views/{view}
+```
+
+### 응답
+
+- **200 OK**: 뷰 존재
+- **404 Not Found**: 뷰가 존재하지 않음
+
+### 예시
+
+```bash
+curl -I http://localhost:8181/v1/namespaces/analytics/views/daily_events
+```
+
+---
+
+## 뷰 교체
+
+뷰의 정의를 새 버전으로 교체합니다.
+
+### 요청
+
+```http
+POST /v1/namespaces/{namespace}/views/{view}
+```
+
+### 요구사항
+
+| 타입 | 필드 | 설명 |
+|------|--------|-------------|
+| `assert-view-uuid` | `uuid` | 뷰 UUID가 일치하는지 확인 |
+
+### 업데이트 액션
+
+| 액션 | 필드 | 설명 |
+|--------|--------|-------------|
+| `assign-uuid` | `uuid` | 뷰 UUID 할당 |
+| `add-schema` | `schema` | 새 스키마 추가 |
+| `add-view-version` | `view-version` | 뷰 버전 추가 |
+| `set-current-view-version` | `view-version-id` | 현재 버전 설정 |
+| `set-location` | `location` | 뷰 위치 설정 |
+| `set-properties` | `updates` | 속성 업데이트 |
+| `remove-properties` | `removals` | 속성 제거 |
+
+### 오류
+
+| 코드 | 오류 | 설명 |
+|------|-------|-------------|
+| 404 | `NoSuchViewException` | 뷰를 찾을 수 없음 |
+| 409 | `CommitFailedException` | 요구사항이 충족되지 않음 |
+
+---
+
+## 뷰 삭제
+
+뷰를 삭제합니다.
+
+### 요청
+
+```http
+DELETE /v1/namespaces/{namespace}/views/{view}
+```
+
+### 응답
+
+- **204 No Content**: 뷰 삭제됨
+
+### 예시
+
+```bash
+curl -X DELETE http://localhost:8181/v1/namespaces/analytics/views/daily_events
+```
+
+---
+
+## 뷰 이름 변경
+
+뷰의 이름을 변경하거나 다른 네임스페이스로 이동합니다.
+
+### 요청
+
+```http
+POST /v1/views/rename
+```
+
+### 요청 본문
+
+```json
+{
+ "source": {
+ "namespace": ["analytics"],
+ "name": "old_view"
+ },
+ "destination": {
+ "namespace": ["analytics"],
+ "name": "new_view"
+ }
+}
+```
+
+### 예시
+
+```bash
+curl -X POST http://localhost:8181/v1/views/rename \
+ -H "Content-Type: application/json" \
+ -d '{
+ "source": {"namespace": ["analytics"], "name": "daily_summary"},
+ "destination": {"namespace": ["analytics"], "name": "daily_aggregates"}
+ }'
+```
+
+---
+
+## SQL 방언
+
+뷰는 표현에서 여러 SQL 방언을 지원합니다:
+
+| 방언 | 설명 |
+|---------|-------------|
+| `spark` | Apache Spark SQL |
+| `trino` | Trino SQL |
+| `presto` | Presto SQL |
+| `flink` | Apache Flink SQL |
+| `hive` | Apache Hive SQL |
+| `dremio` | Dremio SQL |
+
+뷰는 다른 엔진을 위해 여러 표현을 가질 수 있습니다:
+
+```json
+{
+ "representations": [
+ {"type": "sql", "sql": "SELECT ...", "dialect": "spark"},
+ {"type": "sql", "sql": "SELECT ...", "dialect": "trino"}
+ ]
+}
+```
diff --git a/docs-site/content/docs/ko/architecture/data-model.mdx b/docs-site/content/docs/ko/architecture/data-model.mdx
new file mode 100644
index 0000000..ce29f2e
--- /dev/null
+++ b/docs-site/content/docs/ko/architecture/data-model.mdx
@@ -0,0 +1,147 @@
+---
+title: 데이터 모델
+description: Bingsan의 데이터베이스 스키마 및 메타데이터 저장
+---
+
+# 데이터 모델
+
+Bingsan은 모든 카탈로그 메타데이터를 PostgreSQL에 저장합니다.
+
+## 개요
+
+```
+┌─────────────────┐ ┌─────────────────┐
+│ namespaces │ │ tables │
+├─────────────────┤ ├─────────────────┤
+│ id │────▶│ namespace_id │
+│ name │ │ name │
+│ properties │ │ metadata_loc │
+└─────────────────┘ │ metadata │
+ └─────────────────┘
+ │
+ ▼
+┌─────────────────┐ ┌─────────────────┐
+│ views │ │ scan_plans │
+├─────────────────┤ ├─────────────────┤
+│ namespace_id │ │ table_id │
+│ name │ │ plan_id │
+│ metadata_loc │ │ status │
+│ metadata │ │ tasks │
+└─────────────────┘ └─────────────────┘
+```
+
+## 테이블
+
+### namespaces
+
+네임스페이스 메타데이터를 저장합니다.
+
+| 컬럼 | 타입 | 설명 |
+|------|------|------|
+| `id` | BIGSERIAL | 기본 키 |
+| `name` | TEXT[] | 배열 형태의 네임스페이스 이름 |
+| `properties` | JSONB | 네임스페이스 속성 |
+| `created_at` | TIMESTAMPTZ | 생성 타임스탬프 |
+| `updated_at` | TIMESTAMPTZ | 마지막 업데이트 타임스탬프 |
+
+### tables
+
+Iceberg 테이블 메타데이터를 저장합니다.
+
+| 컬럼 | 타입 | 설명 |
+|------|------|------|
+| `id` | BIGSERIAL | 기본 키 |
+| `namespace_id` | BIGINT | namespaces 외래 키 |
+| `name` | TEXT | 테이블 이름 |
+| `table_uuid` | UUID | Iceberg 테이블 UUID |
+| `metadata_location` | TEXT | 현재 메타데이터 파일 경로 |
+| `metadata` | JSONB | 캐시된 테이블 메타데이터 (선택) |
+
+### views
+
+테이블과 유사한 구조로 Iceberg 뷰 메타데이터를 저장합니다.
+
+### scan_plans
+
+서버 측 플래닝을 위한 스캔 플랜 상태를 저장합니다.
+
+## 메타데이터 저장 전략
+
+### 데이터베이스 vs 오브젝트 스토리지
+
+Bingsan은 하이브리드 접근 방식을 사용합니다:
+
+**PostgreSQL 저장:**
+- 네임스페이스 메타데이터
+- 테이블/뷰 레지스트리 (이름, UUID, 위치)
+- 스캔 플랜 상태
+- 캐시된 메타데이터 (선택)
+
+**오브젝트 스토리지 (S3/GCS) 저장:**
+- 전체 Iceberg 메타데이터 JSON 파일
+- 매니페스트 리스트
+- 매니페스트
+- 데이터 파일
+
+## 잠금 모델
+
+### Advisory 락
+
+PostgreSQL advisory 락으로 일관성을 보장합니다:
+
+```sql
+-- 네임스페이스 레벨 락
+SELECT pg_advisory_lock(hashtext('ns:' || $1));
+
+-- 테이블 레벨 락
+SELECT pg_advisory_xact_lock($1);
+```
+
+락은 최소 필요 시간 동안만 유지됩니다.
+
+## 마이그레이션
+
+Bingsan은 스키마 마이그레이션에 [golang-migrate](https://github.com/golang-migrate/migrate)를 사용합니다.
+
+### 자동 마이그레이션
+
+시작 시 마이그레이션이 자동으로 실행됩니다.
+
+### 수동 마이그레이션
+
+```bash
+# 현재 버전 확인
+migrate -database "postgres://..." -path migrations version
+
+# 마이그레이션 적용
+migrate -database "postgres://..." -path migrations up
+
+# 마이그레이션 롤백
+migrate -database "postgres://..." -path migrations down 1
+```
+
+## 인덱스
+
+일반적인 쿼리 패턴에 최적화된 인덱스:
+
+| 쿼리 | 사용 인덱스 |
+|------|------------|
+| 네임스페이스 목록 | `idx_namespaces_name` (GIN) |
+| 이름으로 네임스페이스 조회 | `namespaces_name_key` (UNIQUE) |
+| 네임스페이스 내 테이블 목록 | `idx_tables_namespace` |
+| 이름으로 테이블 조회 | `tables_namespace_id_name_key` (UNIQUE) |
+| UUID로 테이블 찾기 | `idx_tables_uuid` |
+
+## 백업 및 복구
+
+### PostgreSQL 백업
+
+```bash
+# 전체 백업
+pg_dump -h localhost -U iceberg iceberg_catalog > backup.sql
+
+# 복구
+psql -h localhost -U iceberg iceberg_catalog < backup.sql
+```
+
+전체 Iceberg 메타데이터가 오브젝트 스토리지에 있으므로, 필요 시 메타데이터 파일에서 데이터베이스를 재구축할 수 있습니다.
diff --git a/docs-site/content/docs/ko/architecture/index.mdx b/docs-site/content/docs/ko/architecture/index.mdx
new file mode 100644
index 0000000..137fd8c
--- /dev/null
+++ b/docs-site/content/docs/ko/architecture/index.mdx
@@ -0,0 +1,99 @@
+---
+title: 아키텍처
+description: 배포 계획 및 튜닝을 위한 Bingsan 아키텍처 이해
+---
+
+# 아키텍처
+
+Bingsan의 아키텍처를 이해하면 배포 계획, 성능 튜닝, 문제 해결에 도움이 됩니다.
+
+## 개요
+
+Bingsan은 Apache Iceberg REST Catalog 명세를 구현한 상태 비저장(stateless) Go 애플리케이션입니다. 모든 영구 상태는 PostgreSQL에 저장됩니다.
+
+```
+┌─────────────────────────────────────────────────────────────┐
+│ 클라이언트 │
+│ (Spark, Trino, Flink, PyIceberg 등) │
+└─────────────────────────┬───────────────────────────────────┘
+ │ REST API (HTTP)
+┌─────────────────────────▼───────────────────────────────────┐
+│ Bingsan 클러스터 │
+│ ┌─────────┐ ┌─────────┐ ┌─────────┐ │
+│ │ 노드 1 │ │ 노드 2 │ │ 노드 N │ (상태 비저장) │
+│ │ :8181 │ │ :8181 │ │ :8181 │ │
+│ └────┬────┘ └────┬────┘ └────┬────┘ │
+│ └────────────┼────────────┘ │
+│ │ 분산 잠금 │
+└────────────────────┼────────────────────────────────────────┘
+ │
+ ┌────────────┴────────────┐
+┌───────▼───────┐ ┌────────▼────────┐
+│ PostgreSQL │ │ S3 / GCS │
+│ (메타데이터) │ │ (데이터 레이크) │
+└───────────────┘ └─────────────────┘
+```
+
+## 핵심 컴포넌트
+
+### HTTP 서버
+
+- [Fiber](https://gofiber.io/) (fasthttp) 기반
+- 고성능, 저메모리 HTTP 처리
+- HTTP/1.1 keep-alive 지원
+
+### 데이터베이스 레이어
+
+- 모든 메타데이터 저장에 PostgreSQL 사용
+- pgx/v5를 통한 커넥션 풀링
+- 자동 스키마 마이그레이션
+- 분산 잠금을 위한 Advisory 락
+
+### 스토리지 연동
+
+- 테이블용 스토리지 경로 생성
+- 클라이언트 데이터 접근을 위한 자격 증명 제공
+- S3, GCS, 로컬 파일시스템 지원
+
+### 이벤트 스트리밍
+
+- WebSocket 기반 실시간 이벤트
+- Publish/Subscribe 모델
+- 네임스페이스 레벨 필터링
+
+## 설계 원칙
+
+### 상태 비저장 노드
+
+각 Bingsan 인스턴스는 상태를 저장하지 않습니다:
+
+- 모든 상태는 PostgreSQL에 저장
+- 노드 간 통신 불필요
+- 어떤 노드든 모든 요청 처리 가능
+- 수평 확장 용이
+
+### 낙관적 동시성 제어
+
+테이블 커밋은 낙관적 동시성 제어를 사용합니다:
+
+1. 클라이언트가 현재 메타데이터 읽기
+2. 클라이언트가 요구사항과 함께 변경 제출
+3. 서버가 현재 상태에 대해 요구사항 검증
+4. 유효하면 변경 원자적 적용
+
+### 분산 잠금
+
+설정 가능한 타임아웃과 함께 PostgreSQL 행 레벨 잠금으로 동시 수정 방지:
+
+- `SELECT ... FOR UPDATE`를 통한 행 레벨 락
+- 트랜잭션별 설정 가능한 `lock_timeout`
+- 지수 백오프를 통한 자동 재시도
+- 락 충돌 원활히 처리
+
+자세한 설정은 [분산 잠금](/docs/performance/locking)을 참조하세요.
+
+## 섹션
+
+- [요청 흐름](/docs/architecture/request-flow) - 요청 처리 방식
+- [데이터 모델](/docs/architecture/data-model) - 데이터베이스 스키마 및 메타데이터 저장
+- [확장성](/docs/architecture/scalability) - 확장 전략 및 한계
diff --git a/docs-site/content/docs/ko/architecture/meta.json b/docs-site/content/docs/ko/architecture/meta.json
new file mode 100644
index 0000000..be3be97
--- /dev/null
+++ b/docs-site/content/docs/ko/architecture/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "아키텍처",
+ "pages": ["index", "request-flow", "data-model", "scalability"]
+}
diff --git a/docs-site/content/docs/ko/architecture/request-flow.mdx b/docs-site/content/docs/ko/architecture/request-flow.mdx
new file mode 100644
index 0000000..a5fe70b
--- /dev/null
+++ b/docs-site/content/docs/ko/architecture/request-flow.mdx
@@ -0,0 +1,123 @@
+---
+title: 요청 흐름
+description: Bingsan의 HTTP 요청 처리 방식
+---
+
+# 요청 흐름
+
+Bingsan이 요청을 처리하는 방식을 이해하면 디버깅과 성능 최적화에 도움이 됩니다.
+
+## HTTP 요청 라이프사이클
+
+```
+┌──────────────┐
+│ 클라이언트 │
+└──────┬───────┘
+ │ HTTP 요청
+ ▼
+┌──────────────────────────────────────────────┐
+│ Fiber HTTP 서버 │
+├──────────────────────────────────────────────┤
+│ 1. Request ID 미들웨어 │
+│ 2. Recovery 미들웨어 (패닉 처리) │
+│ 3. Prometheus 메트릭 미들웨어 │
+│ 4. CORS 미들웨어 │
+│ 5. 로거 미들웨어 │
+│ 6. 인증 미들웨어 (활성화 시) │
+├──────────────────────────────────────────────┤
+│ 라우트 핸들러 │
+├──────────────────────────────────────────────┤
+│ 데이터베이스 작업 │
+├──────────────────────────────────────────────┤
+│ 이벤트 발행 │
+└──────────────────────────────────────────────┘
+ │
+ ▼ HTTP 응답
+┌──────────────┐
+│ 클라이언트 │
+└──────────────┘
+```
+
+## 미들웨어 스택
+
+### 1. Request ID
+
+각 요청에 고유 ID를 할당하여 추적합니다. `X-Request-ID` 헤더로 반환됩니다.
+
+### 2. Recovery
+
+패닉을 잡아 500 Internal Server Error를 반환합니다.
+
+### 3. Prometheus 메트릭
+
+요청 메트릭을 기록합니다:
+- `iceberg_catalog_http_requests_total` - 카운터
+- `iceberg_catalog_http_request_duration_seconds` - 히스토그램
+
+### 4. CORS
+
+설정 가능한 오리진과 메서드로 크로스 오리진 요청을 허용합니다.
+
+### 5. 로거
+
+메서드, 경로, 상태, 지연시간, 요청 ID 등 요청 상세 정보를 로깅합니다.
+
+### 6. 인증
+
+활성화 시 Bearer 토큰을 검증합니다. 토큰이 유효하지 않거나 없으면 401을 반환합니다.
+
+## 테이블 커밋 흐름
+
+가장 복잡한 작업은 테이블 커밋입니다:
+
+```
+┌──────────────┐
+│ 클라이언트 │
+└──────┬───────┘
+ │ POST /v1/namespaces/{ns}/tables/{table}
+ ▼
+┌──────────────────────────────────────────────┐
+│ CommitTable 핸들러 │
+├──────────────────────────────────────────────┤
+│ 1. 요청 본문 파싱 │
+│ 2. 테이블 식별자 검증 │
+│ 3. 테이블 락 획득 (PostgreSQL advisory) │
+│ 4. 트랜잭션 시작 │
+│ 5. 현재 메타데이터 로드 │
+│ 6. 요구사항 확인 │
+│ 7. 업데이트 적용 │
+│ 8. 새 메타데이터 파일 쓰기 (S3/GCS) │
+│ 9. 데이터베이스 업데이트 │
+│ 10. 트랜잭션 커밋 │
+│ 11. 락 해제 │
+│ 12. 이벤트 발행 │
+└──────────────────────────────────────────────┘
+```
+
+## 에러 처리
+
+### 클라이언트 에러 (4xx)
+
+- **400 Bad Request**: 유효하지 않은 JSON, 필수 필드 누락
+- **401 Unauthorized**: 토큰 누락 또는 유효하지 않음
+- **404 Not Found**: 네임스페이스/테이블 존재하지 않음
+- **409 Conflict**: 요구사항 확인 실패
+
+### 서버 에러 (5xx)
+
+- **500 Internal Server Error**: 예기치 않은 에러, 데이터베이스 장애
+- **503 Service Unavailable**: 데이터베이스 불가용
+
+## 성능 고려사항
+
+### 커넥션 풀링
+
+pgx/v5를 통해 데이터베이스 연결이 풀링되며 최대/최소 연결 수 설정 가능합니다.
+
+### 요청당 고루틴
+
+각 요청은 자체 고루틴에서 실행되며 자동 스케줄링과 효율적인 메모리 사용이 이루어집니다.
+
+### JSON 직렬화
+
+더 빠른 JSON 인코딩/디코딩을 위해 [goccy/go-json](https://github.com/goccy/go-json)을 사용합니다.
diff --git a/docs-site/content/docs/ko/architecture/scalability.mdx b/docs-site/content/docs/ko/architecture/scalability.mdx
new file mode 100644
index 0000000..08a582c
--- /dev/null
+++ b/docs-site/content/docs/ko/architecture/scalability.mdx
@@ -0,0 +1,175 @@
+---
+title: 확장성
+description: Bingsan의 확장 전략 및 용량 계획
+---
+
+# 확장성
+
+Bingsan은 일관성을 유지하면서 수평 확장이 가능하도록 설계되었습니다.
+
+## 수평 확장
+
+### 상태 비저장 아키텍처
+
+각 Bingsan 인스턴스는 상태를 저장하지 않습니다:
+
+- 모든 상태는 PostgreSQL에 저장
+- 노드 간 통신 불필요
+- 어떤 노드든 모든 요청 처리 가능
+- 간단한 로드 밸런싱 (라운드 로빈으로 충분)
+
+```
+┌──────────────────────────────────────────┐
+│ 로드 밸런서 │
+└────────────────────┬─────────────────────┘
+ ┌───────────────┼───────────────┐
+ ▼ ▼ ▼
+┌─────────┐ ┌─────────┐ ┌─────────┐
+│ 노드 1 │ │ 노드 2 │ │ 노드 N │
+└────┬────┘ └────┬────┘ └────┬────┘
+ └─────────────┼─────────────┘
+ ▼
+ ┌───────────────┐
+ │ PostgreSQL │
+ └───────────────┘
+```
+
+### Kubernetes 배포
+
+Kubernetes HPA로 확장:
+
+```yaml
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: bingsan
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: bingsan
+ minReplicas: 3
+ maxReplicas: 50
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 70
+```
+
+## PostgreSQL 확장
+
+### 커넥션 풀 크기
+
+모든 인스턴스의 총 커넥션:
+
+```
+total_connections = max_open_conns × num_instances
+```
+
+예: 25 커넥션 × 10 인스턴스 = 250 커넥션
+
+### PgBouncer를 통한 커넥션 풀링
+
+많은 인스턴스를 위해 PgBouncer 사용:
+
+```ini
+[databases]
+iceberg_catalog = host=postgres port=5432 dbname=iceberg_catalog
+
+[pgbouncer]
+pool_mode = transaction
+max_client_conn = 1000
+default_pool_size = 50
+```
+
+## 성능 특성
+
+### 지연시간
+
+일반적인 작업 지연시간:
+
+| 작업 | p50 | p99 |
+|------|-----|-----|
+| 네임스페이스 목록 | 2ms | 10ms |
+| 네임스페이스 조회 | 1ms | 5ms |
+| 테이블 목록 | 3ms | 15ms |
+| 테이블 로드 | 5ms | 25ms |
+| 테이블 생성 | 20ms | 100ms |
+| 테이블 커밋 | 30ms | 150ms |
+
+### 처리량
+
+단일 노드 용량 (하드웨어에 따라 다름):
+
+- **읽기**: 5,000-10,000 요청/초
+- **쓰기**: 500-2,000 요청/초
+
+노드 추가로 선형 확장 가능합니다.
+
+### 리소스 사용량
+
+인스턴스당:
+
+| 리소스 | 일반 | 피크 |
+|--------|------|------|
+| 메모리 | 50-100 MB | 200 MB |
+| CPU | 0.2 코어 | 1 코어 |
+| 고루틴 | 100-500 | 2,000 |
+
+## 병목 및 해결책
+
+### PostgreSQL 커넥션
+
+**증상**: `too many connections` 에러
+
+**해결책**:
+- PgBouncer 사용
+- 인스턴스당 `max_open_conns` 줄이기
+- PostgreSQL `max_connections` 늘리기
+
+### 락 경합
+
+**증상**: 높은 커밋 지연시간, 타임아웃 에러
+
+**해결책**:
+- `lock_timeout` 늘리기
+- 같은 테이블에 대한 쓰기 빈도 줄이기
+- 테이블 간 워크로드 분산
+
+## 용량 계획
+
+### 인스턴스 추정
+
+```
+instances = (peak_requests_per_second / requests_per_instance) × 1.5
+```
+
+예: 10,000 RPS, 5,000 RPS/인스턴스 = 3 인스턴스 × 1.5 = 5 인스턴스
+
+## 고가용성
+
+### 다중 인스턴스
+
+HA를 위해 최소 3개 인스턴스 실행:
+
+```yaml
+spec:
+ replicas: 3
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ maxSurge: 1
+```
+
+### PostgreSQL HA
+
+자동 장애 조치가 가능한 관리형 PostgreSQL 사용:
+- AWS RDS Multi-AZ
+- GCP Cloud SQL HA
+- Azure Database for PostgreSQL
+
+또는 자체 관리 HA를 위해 Patroni/Stolon으로 배포합니다.
diff --git a/docs-site/content/docs/ko/configuration/auth.mdx b/docs-site/content/docs/ko/configuration/auth.mdx
new file mode 100644
index 0000000..9d71964
--- /dev/null
+++ b/docs-site/content/docs/ko/configuration/auth.mdx
@@ -0,0 +1,80 @@
+---
+title: 인증 설정
+description: Bingsan의 인증 및 권한 부여 구성
+---
+
+# 인증 설정
+
+카탈로그 API의 인증 및 권한 부여를 구성합니다.
+
+## 옵션
+
+```yaml
+auth:
+ enabled: false
+ token_expiry: 1h
+ signing_key: "change-me-in-production"
+
+ oauth2:
+ enabled: false
+ issuer: ""
+
+ api_key:
+ enabled: false
+```
+
+## 참조
+
+| 옵션 | 타입 | 기본값 | 설명 |
+|--------|------|---------|-------------|
+| `enabled` | boolean | `false` | 인증 활성화 |
+| `token_expiry` | duration | `1h` | 액세스 토큰 수명 |
+| `signing_key` | string | - | 토큰 서명 비밀 키 |
+| `oauth2.enabled` | boolean | `false` | OAuth2 엔드포인트 활성화 |
+
+## 인증 활성화
+
+```yaml
+auth:
+ enabled: true
+ token_expiry: 1h
+ signing_key: "your-secure-256-bit-secret-key"
+```
+
+
+프로덕션에서는 반드시 signing_key를 변경하세요.
+
+
+보안 키 생성:
+
+```bash
+openssl rand -hex 32
+```
+
+## OAuth2 토큰 교환
+
+```bash
+curl -X POST http://localhost:8181/v1/oauth/tokens \
+ -H "Content-Type: application/x-www-form-urlencoded" \
+ -d "grant_type=client_credentials" \
+ -d "client_id=my-client" \
+ -d "client_secret=my-secret"
+```
+
+## 클라이언트 설정
+
+### Apache Spark
+
+```properties
+spark.sql.catalog.bingsan.credential=client_id:client_secret
+```
+
+### PyIceberg
+
+```python
+catalog = load_catalog(
+ "rest",
+ uri="http://localhost:8181",
+ credential="client_id:client_secret"
+)
+```
diff --git a/docs-site/content/docs/ko/configuration/database.mdx b/docs-site/content/docs/ko/configuration/database.mdx
new file mode 100644
index 0000000..c603290
--- /dev/null
+++ b/docs-site/content/docs/ko/configuration/database.mdx
@@ -0,0 +1,52 @@
+---
+title: 데이터베이스 설정
+description: 메타데이터 저장을 위한 PostgreSQL 연결 구성
+---
+
+# 데이터베이스 설정
+
+메타데이터 저장을 위한 PostgreSQL 연결을 구성합니다.
+
+## 옵션
+
+```yaml
+database:
+ host: localhost
+ port: 5432
+ user: iceberg
+ password: iceberg
+ database: iceberg_catalog
+ ssl_mode: disable
+ max_open_conns: 25
+ max_idle_conns: 5
+```
+
+## 참조
+
+| 옵션 | 타입 | 기본값 | 설명 |
+|--------|------|---------|-------------|
+| `host` | string | `localhost` | PostgreSQL 서버 호스트명 |
+| `port` | integer | `5432` | PostgreSQL 서버 포트 |
+| `user` | string | `iceberg` | 데이터베이스 사용자 |
+| `password` | string | `iceberg` | 데이터베이스 비밀번호 |
+| `database` | string | `iceberg_catalog` | 데이터베이스 이름 |
+| `ssl_mode` | string | `disable` | SSL 모드 |
+| `max_open_conns` | integer | `25` | 최대 열린 연결 수 |
+
+## SSL 모드
+
+| 모드 | 설명 |
+|------|-------------|
+| `disable` | SSL 없음 (개발 전용) |
+| `require` | SSL 사용, 인증서 검증 안 함 |
+| `verify-full` | 인증서 및 호스트명 검증 |
+
+## 데이터베이스 설정
+
+```sql
+CREATE DATABASE iceberg_catalog;
+CREATE USER iceberg WITH PASSWORD 'your-secure-password';
+GRANT ALL PRIVILEGES ON DATABASE iceberg_catalog TO iceberg;
+```
+
+Bingsan은 시작 시 자동으로 데이터베이스 마이그레이션을 실행합니다.
diff --git a/docs-site/content/docs/ko/configuration/index.mdx b/docs-site/content/docs/ko/configuration/index.mdx
new file mode 100644
index 0000000..b5a60af
--- /dev/null
+++ b/docs-site/content/docs/ko/configuration/index.mdx
@@ -0,0 +1,58 @@
+---
+title: 설정
+description: Bingsan의 완전한 설정 가이드
+---
+
+# 설정
+
+Bingsan은 YAML 파일 또는 환경 변수를 통해 설정됩니다.
+
+## 설정 소스
+
+설정은 다음 순서로 로드됩니다 (나중 소스가 이전 소스를 덮어씀):
+
+1. 기본값
+2. `config.yaml` 파일
+3. 환경 변수 (`ICEBERG_` 접두사)
+
+## 빠른 참조
+
+```yaml
+server:
+ host: 0.0.0.0
+ port: 8181
+ debug: false
+
+database:
+ host: localhost
+ port: 5432
+ user: iceberg
+ password: iceberg
+ database: iceberg_catalog
+
+storage:
+ type: s3
+ warehouse: s3://bucket/warehouse
+
+auth:
+ enabled: false
+ token_expiry: 1h
+```
+
+## 설정 섹션
+
+- [서버](/docs/configuration/server) - HTTP 서버 설정
+- [데이터베이스](/docs/configuration/database) - PostgreSQL 연결
+- [스토리지](/docs/configuration/storage) - S3/GCS/로컬 스토리지 백엔드
+- [인증](/docs/configuration/auth) - OAuth2 및 API 키 인증
+
+## 환경 변수
+
+모든 설정 옵션은 `ICEBERG_` 접두사와 중첩 키에 밑줄을 사용하여 환경 변수로 설정할 수 있습니다:
+
+| 설정 경로 | 환경 변수 |
+|-------------|---------------------|
+| `server.port` | `ICEBERG_SERVER_PORT` |
+| `database.host` | `ICEBERG_DATABASE_HOST` |
+| `storage.s3.region` | `ICEBERG_STORAGE_S3_REGION` |
+| `auth.enabled` | `ICEBERG_AUTH_ENABLED` |
diff --git a/docs-site/content/docs/ko/configuration/meta.json b/docs-site/content/docs/ko/configuration/meta.json
new file mode 100644
index 0000000..cc34a95
--- /dev/null
+++ b/docs-site/content/docs/ko/configuration/meta.json
@@ -0,0 +1,10 @@
+{
+ "title": "설정",
+ "pages": [
+ "index",
+ "server",
+ "database",
+ "storage",
+ "auth"
+ ]
+}
diff --git a/docs-site/content/docs/ko/configuration/server.mdx b/docs-site/content/docs/ko/configuration/server.mdx
new file mode 100644
index 0000000..a031092
--- /dev/null
+++ b/docs-site/content/docs/ko/configuration/server.mdx
@@ -0,0 +1,55 @@
+---
+title: 서버 설정
+description: Bingsan의 HTTP 서버 설정 구성
+---
+
+# 서버 설정
+
+HTTP 서버 설정을 구성합니다.
+
+## 옵션
+
+```yaml
+server:
+ host: 0.0.0.0
+ port: 8181
+ debug: false
+ read_timeout: 30s
+ write_timeout: 30s
+ idle_timeout: 120s
+```
+
+## 참조
+
+| 옵션 | 타입 | 기본값 | 설명 |
+|--------|------|---------|-------------|
+| `host` | string | `0.0.0.0` | 바인딩할 IP 주소 |
+| `port` | integer | `8181` | 리스닝 포트 |
+| `debug` | boolean | `false` | 디버그 모드 활성화 |
+| `read_timeout` | duration | `30s` | 요청 읽기 최대 시간 |
+| `write_timeout` | duration | `30s` | 응답 쓰기 최대 시간 |
+| `idle_timeout` | duration | `120s` | 유휴 연결 최대 시간 |
+
+## 환경 변수
+
+```bash
+ICEBERG_SERVER_HOST=0.0.0.0
+ICEBERG_SERVER_PORT=8181
+ICEBERG_SERVER_DEBUG=false
+```
+
+## 프로덕션 권장 사항
+
+```yaml
+server:
+ host: 0.0.0.0
+ port: 8181
+ debug: false
+ read_timeout: 60s
+ write_timeout: 60s
+ idle_timeout: 300s
+```
+
+
+프로덕션 환경에서는 디버그 모드를 활성화하지 마세요.
+
diff --git a/docs-site/content/docs/ko/configuration/storage.mdx b/docs-site/content/docs/ko/configuration/storage.mdx
new file mode 100644
index 0000000..15326f9
--- /dev/null
+++ b/docs-site/content/docs/ko/configuration/storage.mdx
@@ -0,0 +1,77 @@
+---
+title: 스토리지 설정
+description: Iceberg 데이터 파일용 스토리지 백엔드 구성
+---
+
+# 스토리지 설정
+
+Iceberg 데이터 파일용 스토리지 백엔드를 구성합니다.
+
+## 개요
+
+Bingsan은 세 가지 스토리지 백엔드를 지원합니다:
+
+- **S3** - Amazon S3 및 호환 서비스 (MinIO, R2)
+- **GCS** - Google Cloud Storage
+- **Local** - 로컬 파일시스템 (개발 전용)
+
+## 옵션
+
+```yaml
+storage:
+ type: s3
+ warehouse: s3://bucket/warehouse
+
+ s3:
+ endpoint: ""
+ region: us-east-1
+ access_key_id: ""
+ secret_access_key: ""
+ bucket: warehouse
+ use_path_style: false
+```
+
+## Amazon S3
+
+IAM 역할 사용 (권장):
+
+```yaml
+storage:
+ type: s3
+ warehouse: s3://my-bucket/warehouse
+
+ s3:
+ region: us-east-1
+ bucket: my-bucket
+ # 자격 증명 불필요 - 인스턴스 역할 사용
+```
+
+## MinIO
+
+```yaml
+storage:
+ type: s3
+ warehouse: s3://warehouse/data
+
+ s3:
+ endpoint: "http://minio:9000"
+ access_key_id: "minioadmin"
+ secret_access_key: "minioadmin"
+ bucket: warehouse
+ use_path_style: true
+```
+
+## 로컬 스토리지
+
+
+로컬 스토리지는 개발 및 테스트 전용입니다.
+
+
+```yaml
+storage:
+ type: local
+ warehouse: file:///data/warehouse
+
+ local:
+ root_path: /data/warehouse
+```
diff --git a/docs-site/content/docs/ko/contributing/code-style.mdx b/docs-site/content/docs/ko/contributing/code-style.mdx
new file mode 100644
index 0000000..4ec22cc
--- /dev/null
+++ b/docs-site/content/docs/ko/contributing/code-style.mdx
@@ -0,0 +1,155 @@
+---
+title: 코드 스타일
+description: 코드 스타일 가이드라인 및 린팅
+---
+
+# 코드 스타일
+
+Bingsan은 Go 모범 사례와 프로젝트별 추가 규칙을 따릅니다.
+
+## 포매팅
+
+프로젝트는 **gofumpt** (더 엄격한 gofmt)를 사용합니다:
+
+```bash
+# 설치
+go install mvdan.cc/gofumpt@latest
+
+# 포맷
+gofumpt -w .
+```
+
+## 린팅
+
+golangci-lint는 `.golangci.yml`에 구성되어 있습니다:
+
+```bash
+# 린터 실행
+make lint
+
+# 자동 수정
+make lint-fix
+```
+
+### 활성화된 린터
+
+주요 린터:
+- `gofumpt` - 더 엄격한 포매팅
+- `gosec` - 보안 검사
+- `gocritic` - 코드 품질
+- `revive` - 스타일 검사
+- `errcheck` - 에러 처리
+- `staticcheck` - 정적 분석
+
+## Import 구성
+
+Import는 다음 순서로 그룹화해야 합니다:
+
+```go
+import (
+ // 표준 라이브러리
+ "context"
+ "fmt"
+
+ // 서드파티 패키지
+ "github.com/gofiber/fiber/v2"
+ "github.com/jackc/pgx/v5"
+
+ // 로컬 패키지
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/db"
+)
+```
+
+## 네이밍 규칙
+
+### 파일
+
+- 소문자와 언더스코어: `table_handler.go`
+- 테스트 파일: `table_handler_test.go`
+
+### 함수
+
+- 내보내는 함수: `PascalCase` - `CreateTable`
+- 내보내지 않는 함수: `camelCase` - `validateSchema`
+
+### 변수
+
+- 짧은 수명: `t`, `ctx`, `err`
+- 복잡한 타입은 설명적으로: `tableMetadata`, `namespaceList`
+
+## 에러 처리
+
+항상 에러를 명시적으로 처리하세요:
+
+```go
+// 좋음
+result, err := doSomething()
+if err != nil {
+ return fmt.Errorf("failed to do something: %w", err)
+}
+
+// 나쁨 - 에러 무시
+result, _ := doSomething()
+```
+
+## JSON 직렬화
+
+`encoding/json` 대신 `goccy/go-json`을 사용하세요:
+
+```go
+import "github.com/goccy/go-json"
+
+// Marshal
+data, err := json.Marshal(obj)
+
+// Unmarshal
+err := json.Unmarshal(data, &obj)
+```
+
+## Context 사용
+
+항상 context를 첫 번째 매개변수로 받으세요:
+
+```go
+func (h *Handler) GetTable(ctx context.Context, name string) (*Table, error) {
+ // ctx를 취소, 데드라인, 트레이싱에 사용
+}
+```
+
+## 복잡도 제한
+
+- 순환 복잡도: 최대 15
+- 함수 길이: 50줄 이하 권장
+- 파일 길이: 500줄 이하 권장
+
+## 문서화
+
+### 패키지 주석
+
+```go
+// Package handlers는 Iceberg REST API용 HTTP 핸들러를 제공합니다.
+// Apache Iceberg REST Catalog 스펙을 구현합니다.
+package handlers
+```
+
+### 함수 주석
+
+```go
+// CreateTable은 지정된 네임스페이스에 새 테이블을 생성합니다.
+// 저장하기 전에 테이블 스키마와 메타데이터를 검증합니다.
+// 생성된 테이블 메타데이터 또는 에러를 반환합니다.
+func (h *Handler) CreateTable(ctx context.Context, req CreateTableRequest) (*Table, error) {
+```
+
+## 커밋 메시지
+
+Conventional Commits를 따르세요:
+
+```text
+feat: add table compaction endpoint
+fix: correct metadata serialization for partitions
+docs: update API reference for views
+test: add integration tests for namespace operations
+refactor: extract validation logic to separate package
+```
diff --git a/docs-site/content/docs/ko/contributing/development.mdx b/docs-site/content/docs/ko/contributing/development.mdx
new file mode 100644
index 0000000..ba8d1f4
--- /dev/null
+++ b/docs-site/content/docs/ko/contributing/development.mdx
@@ -0,0 +1,138 @@
+---
+title: 개발 환경
+description: 개발 환경 설정
+---
+
+# 개발 환경
+
+Bingsan을 위한 로컬 개발 환경을 설정합니다.
+
+## 전제조건
+
+- Go 1.23+
+- Docker 및 Docker Compose
+- Make
+- PostgreSQL 클라이언트 (psql) - 선택사항
+
+## 빠른 설정
+
+```bash
+# 클론 및 디렉토리 진입
+git clone https://github.com/teamPaprika/bingsan.git
+cd bingsan
+
+# Go 의존성 설치
+go mod download
+
+# 개발 도구 설치
+make install-tools
+
+# 설정 복사
+cp config.example.yaml config.yaml
+
+# PostgreSQL 시작
+make docker-up
+
+# 핫 리로드와 함께 실행
+make dev
+```
+
+## 빌드 명령어
+
+```bash
+# 바이너리 빌드
+make build
+
+# 핫 리로드 없이 실행
+make run
+
+# 빌드 아티팩트 정리
+make clean
+```
+
+## 데이터베이스 관리
+
+```bash
+# 모든 마이그레이션 적용
+make migrate-up
+
+# 마이그레이션 하나 롤백
+make migrate-down
+
+# 새 마이그레이션 생성
+make migrate-create name=add_feature_table
+```
+
+## 핫 리로드
+
+프로젝트는 개발 중 핫 리로드를 위해 [air](https://github.com/cosmtrek/air)를 사용합니다:
+
+```bash
+make dev
+```
+
+Air는 파일 변경을 감시하고 자동으로 서버를 재빌드하고 재시작합니다.
+
+## IDE 설정
+
+### VS Code
+
+권장 확장 프로그램:
+- Go (golang.go)
+- EditorConfig
+- YAML
+
+설정:
+```json
+{
+ "go.lintTool": "golangci-lint",
+ "go.lintFlags": ["--fast"],
+ "editor.formatOnSave": true
+}
+```
+
+### GoLand
+
+- Go Modules 통합 활성화
+- golangci-lint를 외부 도구로 구성
+- gofumpt를 포매터로 설정
+
+## 환경 변수
+
+개발 시 환경 변수로 config.yaml을 오버라이드할 수 있습니다:
+
+```bash
+export ICEBERG_SERVER_PORT=8181
+export ICEBERG_DATABASE_HOST=localhost
+export ICEBERG_DEBUG=true
+```
+
+## 디버깅
+
+### Delve 디버거
+
+```bash
+# delve 설치
+go install github.com/go-delve/delve/cmd/dlv@latest
+
+# 디버그
+dlv debug ./cmd/iceberg-catalog
+```
+
+### VS Code 실행 구성
+
+```json
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Launch Bingsan",
+ "type": "go",
+ "request": "launch",
+ "mode": "auto",
+ "program": "${workspaceFolder}/cmd/iceberg-catalog",
+ "args": ["-config", "config.yaml"]
+ }
+ ]
+}
+```
diff --git a/docs-site/content/docs/ko/contributing/index.mdx b/docs-site/content/docs/ko/contributing/index.mdx
new file mode 100644
index 0000000..0a6fc84
--- /dev/null
+++ b/docs-site/content/docs/ko/contributing/index.mdx
@@ -0,0 +1,93 @@
+---
+title: 기여하기
+description: Bingsan에 기여하는 방법
+---
+
+# Bingsan에 기여하기
+
+Bingsan에 관심을 가져주셔서 감사합니다! 이 가이드는 시작하는 데 도움이 될 것입니다.
+
+## 시작하기
+
+1. GitHub에서 저장소 포크
+2. 로컬에 포크 클론
+3. 개발 환경 설정
+4. 기능 브랜치 생성
+5. 변경사항 작성
+6. Pull Request 제출
+
+## 개발 환경 설정
+
+```bash
+# 저장소 클론
+git clone https://github.com/YOUR_USERNAME/bingsan.git
+cd bingsan
+
+# 의존성 설치
+go mod download
+
+# 개발 도구 설치
+make install-tools
+
+# 설정 복사
+cp config.example.yaml config.yaml
+
+# 의존성 시작
+make docker-up
+
+# 서버 실행
+make dev
+```
+
+## 프로젝트 구조
+
+```text
+bingsan/
+├── cmd/iceberg-catalog/ # 애플리케이션 진입점
+├── internal/
+│ ├── api/ # HTTP 핸들러 및 미들웨어
+│ ├── db/ # 데이터베이스 레이어 및 마이그레이션
+│ ├── events/ # 이벤트 스트리밍
+│ ├── pool/ # 오브젝트 풀링
+│ ├── config/ # 설정
+│ └── metrics/ # Prometheus 메트릭
+├── tests/ # 테스트 스위트
+├── deployments/ # Docker 및 K8s 설정
+└── docs/ # 문서
+```
+
+## 기여 유형
+
+### 버그 리포트
+
+- 먼저 기존 이슈 검색
+- 재현 단계 포함
+- 환경 세부 정보 제공
+- 관련 로그 첨부
+
+### 기능 요청
+
+- 로드맵과 기존 이슈 확인
+- 사용 사례 설명
+- 가능하면 솔루션 제안
+
+### 코드 기여
+
+- [코드 스타일 가이드](/docs/contributing/code-style) 준수
+- 새 기능에 대한 테스트 작성
+- 필요에 따라 문서 업데이트
+- 커밋을 집중적이고 잘 설명하도록 유지
+
+## Pull Request 프로세스
+
+1. 필요시 README 또는 문서 업데이트
+2. 모든 테스트 통과 확인
+3. 해당되는 경우 CHANGELOG 업데이트
+4. 메인테이너에게 리뷰 요청
+5. 리뷰 피드백에 신속하게 대응
+
+## 커뮤니케이션
+
+- 버그 및 기능에 대한 GitHub Issues
+- 질문을 위한 GitHub Discussions
+- 코드 리뷰를 위한 Pull Request 코멘트
diff --git a/docs-site/content/docs/ko/contributing/meta.json b/docs-site/content/docs/ko/contributing/meta.json
new file mode 100644
index 0000000..f12110a
--- /dev/null
+++ b/docs-site/content/docs/ko/contributing/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "기여하기",
+ "pages": ["index", "development", "testing", "code-style"]
+}
diff --git a/docs-site/content/docs/ko/contributing/testing.mdx b/docs-site/content/docs/ko/contributing/testing.mdx
new file mode 100644
index 0000000..88e2129
--- /dev/null
+++ b/docs-site/content/docs/ko/contributing/testing.mdx
@@ -0,0 +1,160 @@
+---
+title: 테스팅
+description: 테스트 실행 및 작성
+---
+
+# 테스팅
+
+Bingsan은 단위, 통합 및 벤치마크 테스트를 포함한 포괄적인 테스트 스위트를 갖추고 있습니다.
+
+## 테스트 실행
+
+```bash
+# 모든 테스트 실행
+make test
+
+# 상세 출력과 함께 실행
+go test -v ./...
+
+# 특정 패키지 실행
+go test -v ./internal/api/handlers/...
+
+# 특정 테스트 실행
+go test -v -run TestTableCreate ./tests/unit/...
+```
+
+## 테스트 구조
+
+```text
+tests/
+├── unit/ # 단위 테스트
+├── integration/ # 통합 테스트 (데이터베이스 필요)
+├── contract/ # API 계약 테스트
+├── e2e/ # 엔드투엔드 테스트
+├── benchmark/ # 성능 벤치마크
+└── fixtures/ # 테스트 데이터 및 SQL 픽스처
+```
+
+## 단위 테스트
+
+단위 테스트는 코드와 함께 또는 `tests/unit/`에 위치합니다:
+
+```bash
+# 단위 테스트만 실행
+go test -v ./internal/...
+```
+
+## 통합 테스트
+
+통합 테스트는 실행 중인 PostgreSQL 인스턴스가 필요합니다:
+
+```bash
+# 의존성 시작
+make docker-up
+
+# 통합 테스트 실행
+make test-integration
+```
+
+통합 테스트는 `integration` 빌드 태그를 사용합니다:
+
+```go
+//go:build integration
+
+package integration
+
+func TestDatabaseOperations(t *testing.T) {
+ // ...
+}
+```
+
+## 계약 테스트
+
+계약 테스트는 Iceberg REST 스펙과의 API 준수를 검증합니다:
+
+```bash
+go test -v ./tests/contract/...
+```
+
+## 테스트 작성
+
+### 테스트 네이밍
+
+```go
+func TestFeature_Scenario_ExpectedBehavior(t *testing.T) {
+ // 예: TestTableCreate_WithValidInput_ReturnsCreatedTable
+}
+```
+
+### 테이블 기반 테스트
+
+```go
+func TestValidation(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ wantErr bool
+ }{
+ {"valid input", "test", false},
+ {"empty input", "", true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := Validate(tt.input)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+```
+
+### 테스트 픽스처
+
+데이터베이스 테스트에 SQL 픽스처 사용:
+
+```bash
+tests/fixtures/
+├── namespaces.sql
+├── tables.sql
+└── cleanup.sql
+```
+
+## 벤치마크
+
+```bash
+# 모든 벤치마크 실행
+make bench
+
+# 특정 벤치마크 실행
+go test -bench=BenchmarkTable -benchmem ./tests/benchmark/...
+
+# 전후 비교
+go test -bench=. ./tests/benchmark/... | tee before.txt
+# 변경 후
+go test -bench=. ./tests/benchmark/... | tee after.txt
+benchstat before.txt after.txt
+```
+
+## 커버리지
+
+```bash
+# 커버리지 리포트 생성
+go test -coverprofile=coverage.out ./...
+
+# 브라우저에서 보기
+go tool cover -html=coverage.out
+
+# 커버리지 퍼센트 확인
+go tool cover -func=coverage.out
+```
+
+## CI 통합
+
+테스트는 GitHub Actions를 통해 Pull Request에서 자동으로 실행됩니다. CI 파이프라인:
+
+1. 린터 실행 (golangci-lint)
+2. 단위 테스트 실행
+3. PostgreSQL 서비스와 함께 통합 테스트 실행
+4. 커버리지 리포트
diff --git a/docs-site/content/docs/ko/deployment/docker.mdx b/docs-site/content/docs/ko/deployment/docker.mdx
new file mode 100644
index 0000000..2230a86
--- /dev/null
+++ b/docs-site/content/docs/ko/deployment/docker.mdx
@@ -0,0 +1,155 @@
+---
+title: Docker
+description: Docker 또는 Docker Compose를 사용한 Bingsan 배포
+---
+
+# Docker 배포
+
+Docker 또는 Docker Compose를 사용하여 Bingsan을 배포합니다.
+
+## Docker Compose (개발용)
+
+개발 및 테스트를 위한 가장 빠른 시작 방법입니다.
+
+### 전제조건
+
+- Docker Engine 20.10+
+- Docker Compose v2.0+
+
+### 빠른 시작
+
+```bash
+# 저장소 클론
+git clone https://github.com/teamPaprika/bingsan.git
+cd bingsan
+
+# 설정 복사
+cp config.example.yaml config.yaml
+
+# 서비스 시작
+docker compose -f deployments/docker/docker-compose.yml up -d
+```
+
+### docker-compose.yml
+
+```yaml
+version: '3.8'
+
+services:
+ bingsan:
+ image: ghcr.io/kimuyb/bingsan:latest
+ ports:
+ - "8181:8181"
+ environment:
+ - ICEBERG_SERVER_HOST=0.0.0.0
+ - ICEBERG_SERVER_PORT=8181
+ - ICEBERG_DATABASE_HOST=postgres
+ - ICEBERG_DATABASE_PORT=5432
+ - ICEBERG_DATABASE_USER=iceberg
+ - ICEBERG_DATABASE_PASSWORD=iceberg
+ - ICEBERG_DATABASE_DATABASE=iceberg_catalog
+ - ICEBERG_STORAGE_TYPE=local
+ - ICEBERG_STORAGE_WAREHOUSE=file:///data/warehouse
+ volumes:
+ - warehouse-data:/data/warehouse
+ depends_on:
+ postgres:
+ condition: service_healthy
+
+ postgres:
+ image: postgres:15-alpine
+ environment:
+ - POSTGRES_USER=iceberg
+ - POSTGRES_PASSWORD=iceberg
+ - POSTGRES_DB=iceberg_catalog
+ ports:
+ - "5432:5432"
+ volumes:
+ - postgres-data:/var/lib/postgresql/data
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U iceberg -d iceberg_catalog"]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+
+volumes:
+ warehouse-data:
+ postgres-data:
+```
+
+### 스택 관리
+
+```bash
+# 로그 보기
+docker compose -f deployments/docker/docker-compose.yml logs -f
+
+# 서비스 중지
+docker compose -f deployments/docker/docker-compose.yml down
+
+# 중지 및 볼륨 제거
+docker compose -f deployments/docker/docker-compose.yml down -v
+```
+
+## S3와 함께 Docker Compose (MinIO)
+
+S3 호환 스토리지 테스트를 위해 compose 파일에 MinIO를 추가하고 S3 스토리지 설정에 적절한 환경 변수를 구성합니다.
+
+## 독립 실행형 Docker
+
+독립 컨테이너로 Bingsan을 실행합니다 (외부 PostgreSQL 필요).
+
+```bash
+# 이미지 풀
+docker pull ghcr.io/kimuyb/bingsan:latest
+
+# 환경 변수와 함께 실행
+docker run -d \
+ --name bingsan \
+ -p 8181:8181 \
+ -e ICEBERG_DATABASE_HOST=your-postgres-host \
+ -e ICEBERG_DATABASE_PORT=5432 \
+ -e ICEBERG_DATABASE_USER=iceberg \
+ -e ICEBERG_DATABASE_PASSWORD=your-password \
+ -e ICEBERG_DATABASE_DATABASE=iceberg_catalog \
+ -e ICEBERG_STORAGE_TYPE=s3 \
+ -e ICEBERG_STORAGE_WAREHOUSE=s3://your-bucket/warehouse \
+ -e ICEBERG_STORAGE_S3_REGION=us-east-1 \
+ ghcr.io/kimuyb/bingsan:latest
+```
+
+## 프로덕션 권장사항
+
+1. **특정 이미지 태그 사용**: 프로덕션에서 `latest` 사용 금지
+2. **리소스 제한**: 메모리 및 CPU 제한 설정
+3. **헬스 체크**: 컨테이너 헬스 체크 구성
+4. **로깅**: json-file 또는 외부 로깅 드라이버 사용
+5. **시크릿**: Docker secrets 또는 외부 시크릿 관리 사용
+
+## 트러블슈팅
+
+### 컨테이너 시작 안됨
+
+```bash
+# 로그 확인
+docker logs bingsan
+
+# 포트 사용 중인지 확인
+lsof -i :8181
+```
+
+### 데이터베이스 연결 실패
+
+```bash
+# PostgreSQL 연결 테스트
+docker exec bingsan nc -zv postgres 5432
+
+# 환경 변수 확인
+docker exec bingsan env | grep ICEBERG_DATABASE
+```
+
+### 헬스 체크 실패
+
+```bash
+# 헬스 엔드포인트 테스트
+docker exec bingsan wget -q -O- http://localhost:8181/health
+```
diff --git a/docs-site/content/docs/ko/deployment/index.mdx b/docs-site/content/docs/ko/deployment/index.mdx
new file mode 100644
index 0000000..20a5872
--- /dev/null
+++ b/docs-site/content/docs/ko/deployment/index.mdx
@@ -0,0 +1,57 @@
+---
+title: 배포
+description: 다양한 환경에서 Bingsan 배포
+---
+
+# 배포
+
+이 섹션에서는 다양한 환경에서 Bingsan을 배포하는 방법을 다룹니다.
+
+## 배포 옵션
+
+### 개발 환경
+
+- [Docker Compose](/docs/deployment/docker) - 빠른 로컬 설정
+
+### 프로덕션
+
+- [Kubernetes](/docs/deployment/kubernetes) - 확장 가능한 클라우드 배포
+- [Docker](/docs/deployment/docker) - 단일 노드 프로덕션
+
+## 빠른 비교
+
+| 기능 | Docker Compose | Docker | Kubernetes |
+|------|---------------|--------|------------|
+| 복잡도 | 낮음 | 낮음 | 높음 |
+| 확장성 | 단일 노드 | 단일 노드 | 다중 노드 |
+| HA | 아니오 | 아니오 | 예 |
+| 모니터링 | 수동 | 수동 | 통합 |
+| 적합 대상 | 개발 | 소규모 프로덕션 | 대규모 프로덕션 |
+
+## 전제조건
+
+### 모든 배포
+
+- PostgreSQL 15+ 데이터베이스
+- 프로덕션용 오브젝트 스토리지 (S3/GCS)
+
+### Docker
+
+- Docker Engine 20.10+
+- Docker Compose v2.0+ (Compose 배포용)
+
+### Kubernetes
+
+- Kubernetes 1.24+
+- kubectl 구성 완료
+- Helm 3.x (선택사항)
+
+## 공통 설정
+
+배포 방법에 관계없이 다음을 구성해야 합니다:
+
+1. **데이터베이스**: PostgreSQL 연결 정보
+2. **스토리지**: S3/GCS 자격 증명 및 버킷
+3. **인증** (선택사항): OAuth/API 키 설정
+
+모든 옵션은 [설정](/docs/configuration)을 참조하세요.
diff --git a/docs-site/content/docs/ko/deployment/kubernetes.mdx b/docs-site/content/docs/ko/deployment/kubernetes.mdx
new file mode 100644
index 0000000..37a0fd3
--- /dev/null
+++ b/docs-site/content/docs/ko/deployment/kubernetes.mdx
@@ -0,0 +1,225 @@
+---
+title: Kubernetes
+description: 프로덕션을 위한 Kubernetes 배포
+---
+
+# Kubernetes 배포
+
+프로덕션 워크로드를 위해 Kubernetes에 Bingsan을 배포합니다.
+
+## 전제조건
+
+- Kubernetes 1.24+
+- kubectl 구성 완료
+- PostgreSQL 데이터베이스 (관리형 또는 자체 호스팅)
+- 오브젝트 스토리지 (S3/GCS)
+
+## 빠른 시작
+
+### 1. 네임스페이스 생성
+
+```bash
+kubectl create namespace bingsan
+```
+
+### 2. Secrets 생성
+
+```bash
+# 데이터베이스 자격 증명
+kubectl create secret generic bingsan-db \
+ --namespace bingsan \
+ --from-literal=host=postgres.example.com \
+ --from-literal=port=5432 \
+ --from-literal=user=iceberg \
+ --from-literal=password=your-password \
+ --from-literal=database=iceberg_catalog
+
+# S3 자격 증명 (정적 자격 증명 사용 시)
+kubectl create secret generic bingsan-s3 \
+ --namespace bingsan \
+ --from-literal=access-key-id=AKIA... \
+ --from-literal=secret-access-key=...
+```
+
+### 3. ConfigMap 생성
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: bingsan-config
+ namespace: bingsan
+data:
+ config.yaml: |
+ server:
+ host: 0.0.0.0
+ port: 8181
+ debug: false
+
+ storage:
+ type: s3
+ warehouse: s3://your-bucket/warehouse
+ s3:
+ region: us-east-1
+ bucket: your-bucket
+
+ catalog:
+ lock_timeout: 30s
+```
+
+### 4. 배포
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: bingsan
+ template:
+ metadata:
+ labels:
+ app: bingsan
+ spec:
+ containers:
+ - name: bingsan
+ image: ghcr.io/kimuyb/bingsan:latest
+ ports:
+ - containerPort: 8181
+ name: http
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "1000m"
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: http
+ initialDelaySeconds: 5
+ periodSeconds: 5
+```
+
+## Service
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ type: ClusterIP
+ selector:
+ app: bingsan
+ ports:
+ - port: 8181
+ targetPort: http
+ name: http
+```
+
+## HorizontalPodAutoscaler
+
+```yaml
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: bingsan
+ minReplicas: 3
+ maxReplicas: 20
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 70
+```
+
+## PodDisruptionBudget
+
+```yaml
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ minAvailable: 2
+ selector:
+ matchLabels:
+ app: bingsan
+```
+
+## 클라우드 제공자 통합
+
+### AWS IAM Roles for Service Accounts (IRSA)
+
+정적 자격 증명 없이 AWS S3에 액세스하려면 IAM roles for service accounts를 사용하세요.
+
+### GCP Workload Identity
+
+GCS 액세스를 위해 GKE 클러스터에서 Workload Identity를 구성하세요.
+
+## 모니터링
+
+### ServiceMonitor (Prometheus Operator)
+
+```yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: bingsan
+ namespace: bingsan
+spec:
+ selector:
+ matchLabels:
+ app: bingsan
+ endpoints:
+ - port: http
+ path: /metrics
+ interval: 15s
+```
+
+## 트러블슈팅
+
+### Pod 상태 확인
+
+```bash
+kubectl get pods -n bingsan
+kubectl describe pod -n bingsan bingsan-xxx
+```
+
+### 로그 보기
+
+```bash
+kubectl logs -n bingsan -l app=bingsan --tail=100 -f
+```
+
+### 연결 테스트
+
+```bash
+# 로컬 테스트를 위한 포트 포워딩
+kubectl port-forward -n bingsan svc/bingsan 8181:8181
+
+# 헬스 테스트
+curl http://localhost:8181/health
+```
diff --git a/docs-site/content/docs/ko/deployment/meta.json b/docs-site/content/docs/ko/deployment/meta.json
new file mode 100644
index 0000000..0ab45e0
--- /dev/null
+++ b/docs-site/content/docs/ko/deployment/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "배포",
+ "pages": ["index", "docker", "kubernetes"]
+}
diff --git a/docs-site/content/docs/ko/getting-started/first-steps.mdx b/docs-site/content/docs/ko/getting-started/first-steps.mdx
new file mode 100644
index 0000000..546dc49
--- /dev/null
+++ b/docs-site/content/docs/ko/getting-started/first-steps.mdx
@@ -0,0 +1,230 @@
+---
+title: 첫 번째 단계
+description: Bingsan에서 첫 번째 네임스페이스와 테이블 생성하기
+---
+
+# 첫 번째 단계
+
+Bingsan을 설치한 후 이 가이드를 따라 첫 번째 네임스페이스와 테이블을 생성해보세요.
+
+## 계층 구조 이해하기
+
+Iceberg 카탈로그는 계층 구조를 사용합니다:
+
+```
+Catalog (Bingsan)
+└── Namespace (예: "analytics", "raw.events")
+ └── Table (예: "user_events")
+ └── Metadata (스키마, 파티션, 스냅샷)
+```
+
+## 네임스페이스 생성
+
+네임스페이스는 테이블의 컨테이너입니다. 점(.) 또는 배열을 사용하여 중첩할 수 있습니다.
+
+```bash
+# 간단한 네임스페이스 생성
+curl -X POST http://localhost:8181/v1/namespaces \
+ -H "Content-Type: application/json" \
+ -d '{
+ "namespace": ["analytics"],
+ "properties": {
+ "owner": "data-team",
+ "description": "Analytics data warehouse"
+ }
+ }'
+```
+
+응답:
+```json
+{
+ "namespace": ["analytics"],
+ "properties": {
+ "owner": "data-team",
+ "description": "Analytics data warehouse"
+ }
+}
+```
+
+### 중첩 네임스페이스
+
+중첩 네임스페이스 생성:
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces \
+ -H "Content-Type: application/json" \
+ -d '{
+ "namespace": ["analytics", "events"],
+ "properties": {}
+ }'
+```
+
+## 네임스페이스 목록 조회
+
+```bash
+# 모든 네임스페이스 조회
+curl http://localhost:8181/v1/namespaces
+```
+
+응답:
+```json
+{
+ "namespaces": [
+ ["analytics"],
+ ["analytics", "events"]
+ ]
+}
+```
+
+### 상위 네임스페이스로 필터링
+
+```bash
+# 하위 네임스페이스 조회
+curl "http://localhost:8181/v1/namespaces?parent=analytics"
+```
+
+## 네임스페이스 상세 조회
+
+```bash
+curl http://localhost:8181/v1/namespaces/analytics
+```
+
+응답:
+```json
+{
+ "namespace": ["analytics"],
+ "properties": {
+ "owner": "data-team",
+ "description": "Analytics data warehouse"
+ }
+}
+```
+
+## 테이블 생성
+
+테이블은 Iceberg 사양을 따르는 스키마 정의가 필요합니다.
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces/analytics/tables \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "user_events",
+ "schema": {
+ "type": "struct",
+ "schema-id": 0,
+ "fields": [
+ {"id": 1, "name": "event_id", "required": true, "type": "string"},
+ {"id": 2, "name": "user_id", "required": true, "type": "long"},
+ {"id": 3, "name": "event_type", "required": true, "type": "string"},
+ {"id": 4, "name": "event_time", "required": true, "type": "timestamptz"},
+ {"id": 5, "name": "properties", "required": false, "type": {"type": "map", "key-id": 6, "key": "string", "value-id": 7, "value": "string"}}
+ ]
+ },
+ "partition-spec": {
+ "spec-id": 0,
+ "fields": [
+ {"source-id": 4, "field-id": 1000, "name": "event_day", "transform": "day"}
+ ]
+ },
+ "properties": {
+ "format-version": "2",
+ "write.parquet.compression-codec": "zstd"
+ }
+ }'
+```
+
+## 테이블 목록 조회
+
+```bash
+curl http://localhost:8181/v1/namespaces/analytics/tables
+```
+
+응답:
+```json
+{
+ "identifiers": [
+ {"namespace": ["analytics"], "name": "user_events"}
+ ]
+}
+```
+
+## 테이블 메타데이터 로드
+
+```bash
+curl http://localhost:8181/v1/namespaces/analytics/tables/user_events
+```
+
+이 API는 다음을 포함한 전체 테이블 메타데이터를 반환합니다:
+- 현재 스키마
+- 파티션 사양
+- 정렬 순서
+- 현재 스냅샷
+- 테이블 속성
+
+## 테이블 존재 확인
+
+가벼운 존재 확인을 위해 HEAD 요청을 사용합니다:
+
+```bash
+curl -I http://localhost:8181/v1/namespaces/analytics/tables/user_events
+```
+
+존재하면 HTTP 200, 존재하지 않으면 404를 반환합니다.
+
+## 네임스페이스 속성 업데이트
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces/analytics/properties \
+ -H "Content-Type: application/json" \
+ -d '{
+ "updates": {
+ "owner": "platform-team"
+ },
+ "removals": ["description"]
+ }'
+```
+
+## 리소스 삭제
+
+### 테이블 삭제
+
+```bash
+curl -X DELETE http://localhost:8181/v1/namespaces/analytics/tables/user_events
+```
+
+### 네임스페이스 삭제
+
+
+네임스페이스는 비어 있어야 삭제할 수 있습니다.
+
+
+```bash
+curl -X DELETE http://localhost:8181/v1/namespaces/analytics
+```
+
+## Spark와 함께 사용하기
+
+카탈로그가 설정되면 Spark에서 연결합니다:
+
+```python
+from pyspark.sql import SparkSession
+
+spark = SparkSession.builder \
+ .appName("Bingsan Example") \
+ .config("spark.sql.catalog.bingsan", "org.apache.iceberg.spark.SparkCatalog") \
+ .config("spark.sql.catalog.bingsan.type", "rest") \
+ .config("spark.sql.catalog.bingsan.uri", "http://localhost:8181") \
+ .getOrCreate()
+
+# 테이블 목록
+spark.sql("SHOW TABLES IN bingsan.analytics").show()
+
+# 테이블 쿼리
+spark.sql("SELECT * FROM bingsan.analytics.user_events LIMIT 10").show()
+```
+
+## 다음 단계
+
+- [모든 API 엔드포인트 살펴보기](/docs/api)
+- [인증 설정](/docs/configuration/auth)
+- [모니터링 설정](/docs/configuration/monitoring)
diff --git a/docs-site/content/docs/ko/getting-started/index.mdx b/docs-site/content/docs/ko/getting-started/index.mdx
new file mode 100644
index 0000000..edf54d5
--- /dev/null
+++ b/docs-site/content/docs/ko/getting-started/index.mdx
@@ -0,0 +1,22 @@
+---
+title: 시작하기
+description: 고성능 Apache Iceberg REST Catalog인 Bingsan을 시작해보세요
+---
+
+# 시작하기
+
+이 섹션에서는 Bingsan을 설치하고 실행하는 데 필요한 모든 내용을 다룹니다.
+
+## 사전 요구사항
+
+Bingsan을 설치하기 전에 다음 항목이 준비되어 있는지 확인하세요:
+
+- **Go 1.25+** (소스에서 빌드하는 경우)
+- **PostgreSQL 15+** (메타데이터 저장소)
+- **Docker & Docker Compose** (개발 환경에 권장)
+
+## 섹션
+
+- [빠른 시작](/docs/getting-started/quick-start) - 5분 만에 실행하기
+- [설치](/docs/getting-started/installation) - 상세 설치 옵션
+- [첫 번째 단계](/docs/getting-started/first-steps) - 첫 번째 네임스페이스와 테이블 생성하기
diff --git a/docs-site/content/docs/ko/getting-started/installation.mdx b/docs-site/content/docs/ko/getting-started/installation.mdx
new file mode 100644
index 0000000..7dbf4a6
--- /dev/null
+++ b/docs-site/content/docs/ko/getting-started/installation.mdx
@@ -0,0 +1,160 @@
+---
+title: 설치
+description: Bingsan의 모든 설치 방법 완벽 가이드
+---
+
+# 설치
+
+이 가이드에서는 Bingsan의 모든 설치 방법을 다룹니다.
+
+## 방법 1: Docker Compose (권장)
+
+개발 및 테스트를 위한 가장 쉬운 방법입니다.
+
+### 사전 요구사항
+
+- Docker Engine 20.10+
+- Docker Compose v2.0+
+
+### 설치 단계
+
+```bash
+# 저장소 클론
+git clone https://github.com/teamPaprika/bingsan.git
+cd bingsan
+
+# 설정 복사
+cp config.example.yaml config.yaml
+
+# 서비스 시작
+docker compose -f deployments/docker/docker-compose.yml up -d
+```
+
+## 방법 2: 소스에서 빌드
+
+Bingsan을 직접 빌드하여 실행합니다.
+
+### 사전 요구사항
+
+- Go 1.25 이상
+- PostgreSQL 15+ (실행 중이고 접근 가능해야 함)
+
+### 설치 단계
+
+```bash
+# 저장소 클론
+git clone https://github.com/teamPaprika/bingsan.git
+cd bingsan
+
+# 의존성 다운로드
+go mod download
+
+# 바이너리 빌드
+go build -o bin/iceberg-catalog ./cmd/iceberg-catalog
+
+# 설정
+cp config.example.yaml config.yaml
+# PostgreSQL 연결 정보로 config.yaml 편집
+
+# 실행
+./bin/iceberg-catalog
+```
+
+### 빌드 옵션
+
+버전 정보와 함께 빌드:
+
+```bash
+go build -ldflags "-X main.version=1.0.0 -X main.commit=$(git rev-parse HEAD)" \
+ -o bin/iceberg-catalog ./cmd/iceberg-catalog
+```
+
+다른 플랫폼용 빌드:
+
+```bash
+# Linux AMD64
+GOOS=linux GOARCH=amd64 go build -o bin/iceberg-catalog-linux-amd64 ./cmd/iceberg-catalog
+
+# Linux ARM64
+GOOS=linux GOARCH=arm64 go build -o bin/iceberg-catalog-linux-arm64 ./cmd/iceberg-catalog
+
+# macOS AMD64
+GOOS=darwin GOARCH=amd64 go build -o bin/iceberg-catalog-darwin-amd64 ./cmd/iceberg-catalog
+
+# macOS ARM64 (Apple Silicon)
+GOOS=darwin GOARCH=arm64 go build -o bin/iceberg-catalog-darwin-arm64 ./cmd/iceberg-catalog
+```
+
+## 방법 3: Docker 이미지
+
+사전 빌드된 Docker 이미지를 풀하여 실행합니다.
+
+```bash
+# 이미지 풀
+docker pull ghcr.io/kimuyb/bingsan:latest
+
+# 환경 변수와 함께 실행
+docker run -d \
+ --name bingsan \
+ -p 8181:8181 \
+ -e ICEBERG_DATABASE_HOST=postgres \
+ -e ICEBERG_DATABASE_PORT=5432 \
+ -e ICEBERG_DATABASE_USER=iceberg \
+ -e ICEBERG_DATABASE_PASSWORD=iceberg \
+ -e ICEBERG_DATABASE_DATABASE=iceberg_catalog \
+ ghcr.io/kimuyb/bingsan:latest
+```
+
+## 방법 4: Kubernetes
+
+Helm 또는 raw 매니페스트를 사용하여 Kubernetes에 배포합니다.
+
+자세한 내용은 [Kubernetes 배포 가이드](/docs/deployment/kubernetes)를 참조하세요.
+
+## 데이터베이스 설정
+
+Bingsan은 PostgreSQL 15 이상이 필요합니다.
+
+### 데이터베이스 생성
+
+```sql
+CREATE DATABASE iceberg_catalog;
+CREATE USER iceberg WITH PASSWORD 'iceberg';
+GRANT ALL PRIVILEGES ON DATABASE iceberg_catalog TO iceberg;
+```
+
+### 자동 마이그레이션
+
+Bingsan은 시작 시 자동으로 데이터베이스 마이그레이션을 실행합니다. 수동 스키마 설정이 필요하지 않습니다.
+
+## 설치 확인
+
+Bingsan을 시작한 후 올바르게 실행되고 있는지 확인합니다:
+
+```bash
+# 헬스 체크
+curl http://localhost:8181/health
+
+# 준비 상태 체크 (DB 포함)
+curl http://localhost:8181/ready
+
+# 설정 조회
+curl http://localhost:8181/v1/config
+```
+
+## 환경 변수
+
+모든 설정 옵션은 `ICEBERG_` 접두사를 사용하여 환경 변수로 설정할 수 있습니다:
+
+| 환경 변수 | 설정 경로 | 설명 |
+|---------------------|-------------|-------------|
+| `ICEBERG_SERVER_HOST` | `server.host` | 바인드 주소 |
+| `ICEBERG_SERVER_PORT` | `server.port` | 리슨 포트 |
+| `ICEBERG_DATABASE_HOST` | `database.host` | PostgreSQL 호스트 |
+| `ICEBERG_DATABASE_PORT` | `database.port` | PostgreSQL 포트 |
+| `ICEBERG_DATABASE_USER` | `database.user` | 데이터베이스 사용자 |
+| `ICEBERG_DATABASE_PASSWORD` | `database.password` | 데이터베이스 비밀번호 |
+| `ICEBERG_DATABASE_DATABASE` | `database.database` | 데이터베이스 이름 |
+| `ICEBERG_AUTH_ENABLED` | `auth.enabled` | 인증 활성화 |
+
+모든 옵션은 [설정 가이드](/docs/configuration)를 참조하세요.
diff --git a/docs-site/content/docs/ko/getting-started/meta.json b/docs-site/content/docs/ko/getting-started/meta.json
new file mode 100644
index 0000000..5179a1a
--- /dev/null
+++ b/docs-site/content/docs/ko/getting-started/meta.json
@@ -0,0 +1,9 @@
+{
+ "title": "시작하기",
+ "pages": [
+ "index",
+ "quick-start",
+ "installation",
+ "first-steps"
+ ]
+}
diff --git a/docs-site/content/docs/ko/getting-started/quick-start.mdx b/docs-site/content/docs/ko/getting-started/quick-start.mdx
new file mode 100644
index 0000000..3d6f1b6
--- /dev/null
+++ b/docs-site/content/docs/ko/getting-started/quick-start.mdx
@@ -0,0 +1,110 @@
+---
+title: 빠른 시작
+description: Docker Compose를 사용하여 5분 만에 Bingsan 실행하기
+---
+
+# 빠른 시작
+
+Docker Compose를 사용하여 5분 만에 Bingsan을 실행해보세요.
+
+## 사전 요구사항
+
+- Docker Engine 20.10+
+- Docker Compose v2.0+
+
+## Step 1: 저장소 클론
+
+```bash
+git clone https://github.com/teamPaprika/bingsan.git
+cd bingsan
+```
+
+## Step 2: 설정
+
+예제 설정 파일을 복사합니다:
+
+```bash
+cp config.example.yaml config.yaml
+```
+
+기본 설정은 로컬 개발 환경에서 바로 사용할 수 있습니다. 프로덕션 환경의 경우 [설정 가이드](/docs/configuration)를 참조하세요.
+
+## Step 3: Docker Compose로 시작
+
+```bash
+docker compose -f deployments/docker/docker-compose.yml up -d
+```
+
+다음 서비스가 시작됩니다:
+- **Bingsan** - 포트 8181의 REST catalog 서버
+- **PostgreSQL** - 포트 5432의 메타데이터 데이터베이스
+
+## Step 4: 설치 확인
+
+헬스 엔드포인트를 확인합니다:
+
+```bash
+curl http://localhost:8181/health
+```
+
+예상 응답:
+```json
+{"status": "ok"}
+```
+
+준비 상태 확인 (데이터베이스 연결 포함):
+
+```bash
+curl http://localhost:8181/ready
+```
+
+예상 응답:
+```json
+{"status": "ready", "database": "connected"}
+```
+
+## Step 5: 첫 번째 네임스페이스 생성
+
+```bash
+curl -X POST http://localhost:8181/v1/namespaces \
+ -H "Content-Type: application/json" \
+ -d '{
+ "namespace": ["analytics"],
+ "properties": {
+ "owner": "data-team"
+ }
+ }'
+```
+
+## Step 6: 네임스페이스 목록 조회
+
+```bash
+curl http://localhost:8181/v1/namespaces
+```
+
+응답:
+```json
+{
+ "namespaces": [
+ ["analytics"]
+ ]
+}
+```
+
+## 다음 단계
+
+- [첫 번째 테이블 생성하기](/docs/getting-started/first-steps)
+- [모든 API 엔드포인트 알아보기](/docs/api)
+- [프로덕션 설정](/docs/configuration)
+
+## 서비스 중지
+
+```bash
+docker compose -f deployments/docker/docker-compose.yml down
+```
+
+데이터 볼륨도 함께 삭제하려면:
+
+```bash
+docker compose -f deployments/docker/docker-compose.yml down -v
+```
diff --git a/docs-site/content/docs/ko/integrations/index.mdx b/docs-site/content/docs/ko/integrations/index.mdx
new file mode 100644
index 0000000..410fcb9
--- /dev/null
+++ b/docs-site/content/docs/ko/integrations/index.mdx
@@ -0,0 +1,23 @@
+---
+title: 연동
+description: Spark, Trino, PyIceberg 등과 Bingsan 연결하기
+---
+
+# 연동
+
+Bingsan은 표준 REST Catalog API를 통해 모든 주요 Iceberg 클라이언트와 연동됩니다.
+
+## 지원 클라이언트
+
+- [Apache Spark](/docs/integrations/spark) - 배치 및 스트리밍 데이터 처리
+- [Trino](/docs/integrations/trino) - 분산 SQL 쿼리 엔진
+- [PyIceberg](/docs/integrations/pyiceberg) - Iceberg용 Python 라이브러리
+
+## 연결 개요
+
+모든 클라이언트는 REST catalog 타입을 사용하여 연결합니다:
+
+```
+Catalog URI: http://localhost:8181
+Catalog Type: rest
+```
diff --git a/docs-site/content/docs/ko/integrations/meta.json b/docs-site/content/docs/ko/integrations/meta.json
new file mode 100644
index 0000000..f2dbcf2
--- /dev/null
+++ b/docs-site/content/docs/ko/integrations/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "연동",
+ "pages": ["index", "spark", "trino", "pyiceberg"]
+}
diff --git a/docs-site/content/docs/ko/integrations/pyiceberg.mdx b/docs-site/content/docs/ko/integrations/pyiceberg.mdx
new file mode 100644
index 0000000..3a399c2
--- /dev/null
+++ b/docs-site/content/docs/ko/integrations/pyiceberg.mdx
@@ -0,0 +1,51 @@
+---
+title: PyIceberg
+description: Bingsan과 함께 PyIceberg Python 라이브러리 사용하기
+---
+
+# PyIceberg 연동
+
+Python에서 PyIceberg를 사용하여 Bingsan과 상호작용할 수 있습니다.
+
+## 설치
+
+```bash
+pip install pyiceberg
+```
+
+## 연결
+
+```python
+from pyiceberg.catalog import load_catalog
+
+catalog = load_catalog(
+ "rest",
+ uri="http://localhost:8181"
+)
+```
+
+## 인증 사용
+
+```python
+catalog = load_catalog(
+ "rest",
+ uri="http://localhost:8181",
+ credential="client_id:client_secret"
+)
+```
+
+## 사용법
+
+```python
+# 네임스페이스 목록
+catalog.list_namespaces()
+
+# 테이블 목록
+catalog.list_tables("analytics")
+
+# 테이블 로드
+table = catalog.load_table("analytics.user_events")
+
+# 데이터 읽기
+df = table.scan().to_pandas()
+```
diff --git a/docs-site/content/docs/ko/integrations/spark.mdx b/docs-site/content/docs/ko/integrations/spark.mdx
new file mode 100644
index 0000000..1e97803
--- /dev/null
+++ b/docs-site/content/docs/ko/integrations/spark.mdx
@@ -0,0 +1,45 @@
+---
+title: Apache Spark
+description: Apache Spark를 Bingsan REST Catalog에 연결하기
+---
+
+# Apache Spark 연동
+
+Apache Spark를 Bingsan에 연결하여 Iceberg 테이블을 읽고 쓸 수 있습니다.
+
+## 설정
+
+```python
+from pyspark.sql import SparkSession
+
+spark = SparkSession.builder \
+ .appName("Bingsan Example") \
+ .config("spark.sql.catalog.bingsan", "org.apache.iceberg.spark.SparkCatalog") \
+ .config("spark.sql.catalog.bingsan.type", "rest") \
+ .config("spark.sql.catalog.bingsan.uri", "http://localhost:8181") \
+ .getOrCreate()
+```
+
+## 인증 사용
+
+```python
+spark = SparkSession.builder \
+ .config("spark.sql.catalog.bingsan.credential", "client_id:client_secret") \
+ .getOrCreate()
+```
+
+## 사용법
+
+```sql
+-- 테이블 목록
+SHOW TABLES IN bingsan.analytics;
+
+-- 테이블 쿼리
+SELECT * FROM bingsan.analytics.user_events LIMIT 10;
+
+-- 테이블 생성
+CREATE TABLE bingsan.analytics.events (
+ id BIGINT,
+ data STRING
+) USING iceberg;
+```
diff --git a/docs-site/content/docs/ko/integrations/trino.mdx b/docs-site/content/docs/ko/integrations/trino.mdx
new file mode 100644
index 0000000..8675869
--- /dev/null
+++ b/docs-site/content/docs/ko/integrations/trino.mdx
@@ -0,0 +1,39 @@
+---
+title: Trino
+description: Trino를 Bingsan REST Catalog에 연결하기
+---
+
+# Trino 연동
+
+Trino를 Bingsan에 연결하여 Iceberg 테이블에 대한 분산 SQL 쿼리를 실행할 수 있습니다.
+
+## 커넥터 설정
+
+`etc/catalog/iceberg.properties` 파일을 생성합니다:
+
+```properties
+connector.name=iceberg
+iceberg.catalog.type=rest
+iceberg.rest-catalog.uri=http://bingsan:8181
+```
+
+## 인증 사용
+
+```properties
+connector.name=iceberg
+iceberg.catalog.type=rest
+iceberg.rest-catalog.uri=http://bingsan:8181
+iceberg.rest-catalog.security=OAUTH2
+iceberg.rest-catalog.oauth2.client-id=client_id
+iceberg.rest-catalog.oauth2.client-secret=client_secret
+```
+
+## 사용법
+
+```sql
+-- 스키마 목록
+SHOW SCHEMAS FROM iceberg;
+
+-- 테이블 쿼리
+SELECT * FROM iceberg.analytics.user_events LIMIT 10;
+```
diff --git a/docs-site/content/docs/ko/performance/benchmarking.mdx b/docs-site/content/docs/ko/performance/benchmarking.mdx
new file mode 100644
index 0000000..a183171
--- /dev/null
+++ b/docs-site/content/docs/ko/performance/benchmarking.mdx
@@ -0,0 +1,180 @@
+---
+title: 벤치마킹
+description: Go 벤치마크와 Apache Polaris Tools를 사용한 부하 테스트
+---
+
+import { Callout } from 'fumadocs-ui/components/callout';
+
+# 벤치마킹
+
+Bingsan은 Go 내장 벤치마크와 Apache Polaris Tools Gatling 프레임워크를 사용한 포괄적인 벤치마킹을 지원합니다.
+
+## 빠른 시작
+
+### Go 벤치마크
+
+```bash
+# 모든 벤치마크
+go test -bench=. -benchmem ./tests/benchmark/...
+
+# 특정 벤치마크
+go test -bench=BenchmarkTable -benchmem ./tests/benchmark/...
+
+# 풀 벤치마크
+go test -bench=BenchmarkPool -benchmem ./tests/benchmark/...
+```
+
+### Polaris Tools를 사용한 부하 테스트
+
+```bash
+cd benchmarks
+
+# 일회성 설정
+make setup
+
+# OAuth2로 Bingsan 시작
+make start-bingsan
+
+# 테스트 데이터셋 생성
+make create-dataset
+
+# 읽기 벤치마크 실행
+make read-benchmark
+
+# 결과 보기
+make report
+```
+
+## Go 벤치마크
+
+### 사용 가능한 벤치마크
+
+| 벤치마크 | 설명 |
+|----------|------|
+| `BenchmarkBaseline` | 풀링 없는 기준선 |
+| `BenchmarkPool` | 오브젝트 풀링 사용 |
+| `BenchmarkTable` | 테이블 작업 |
+| `BenchmarkNamespace` | 네임스페이스 작업 |
+| `BenchmarkConcurrent` | 동시 부하 |
+| `BenchmarkMemory` | 메모리 할당 |
+
+### 전후 비교
+
+```bash
+# 기준선
+go test -bench=. -benchmem ./tests/benchmark/... | tee baseline.txt
+
+# 변경 후
+go test -bench=. -benchmem ./tests/benchmark/... | tee optimized.txt
+
+# 비교
+benchstat baseline.txt optimized.txt
+```
+
+### 프로파일링과 함께
+
+```bash
+# CPU 프로파일
+go test -bench=BenchmarkTable -cpuprofile=cpu.prof ./tests/benchmark/...
+go tool pprof cpu.prof
+
+# 메모리 프로파일
+go test -bench=BenchmarkTable -memprofile=mem.prof ./tests/benchmark/...
+go tool pprof mem.prof
+```
+
+### 예상 결과
+
+| 벤치마크 | 작업/초 | ns/op | B/op | allocs/op |
+|----------|---------|-------|------|-----------|
+| TableMetadata | 100,000+ | <10,000 | <5,000 | <50 |
+| LargeSchema | 10,000+ | <100,000 | <50,000 | <200 |
+| PoolGet/Put | 20,000,000+ | <50 | 0 | 0 |
+
+## Polaris Tools 부하 테스트
+
+### 전제조건
+
+- **Java 17+** - Gatling에 필요
+- **Docker & Docker Compose** - Bingsan 실행용
+- **Make** - 벤치마크 명령 실행용
+
+### 설정
+
+```bash
+cd benchmarks
+make setup
+```
+
+### 벤치마크 실행
+
+| 명령 | 설명 |
+|------|------|
+| `make read-benchmark` | 읽기 전용 작업 |
+| `make read-update-benchmark` | 혼합 읽기/쓰기 (80/20) |
+| `make create-commits-benchmark` | 커밋 처리량 |
+| `make weighted-benchmark` | 가중치 워크로드 시뮬레이션 |
+| `make full-benchmark` | 모든 벤치마크 순차 실행 |
+
+### 설정
+
+`config/bingsan.conf` 편집:
+
+```text
+http.base-url = "http://localhost:8181"
+
+auth.client-id = "benchmark-client"
+auth.client-secret = "benchmark-secret"
+
+dataset {
+ namespace-width = 2
+ namespace-depth = 3
+ tables-per-namespace = 5
+ views-per-namespace = 3
+}
+
+workload.read-update-tree-dataset {
+ read-write-ratio = 0.8
+ throughput = "50/sec"
+ duration-in-minutes = 3
+}
+```
+
+## Polaris 호환 모드
+
+
+**프로덕션 경고**: 프로덕션에서는 `polaris_enabled: false`를 유지하세요. 이 모드는 벤치마크 호환성만을 위한 것입니다.
+
+
+Polaris Tools는 Polaris 호환 API를 기대합니다. 다음을 통해 활성화:
+
+```yaml
+compat:
+ polaris_enabled: true
+```
+
+## CI/CD 통합
+
+### GitHub Actions 예시
+
+```yaml
+name: Benchmark
+
+on:
+ push:
+ branches: [main]
+
+jobs:
+ benchmark:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version: '1.23'
+ - name: 벤치마크 실행
+ run: |
+ go test -bench=. -benchmem ./tests/benchmark/... | tee results.txt
+ - name: 기준선과 비교
+ run: benchstat baseline.txt results.txt
+```
diff --git a/docs-site/content/docs/ko/performance/index.mdx b/docs-site/content/docs/ko/performance/index.mdx
new file mode 100644
index 0000000..a58dc51
--- /dev/null
+++ b/docs-site/content/docs/ko/performance/index.mdx
@@ -0,0 +1,53 @@
+---
+title: 성능
+description: Bingsan 성능 최적화 및 튜닝
+---
+
+# 성능
+
+Bingsan은 최소한의 메모리 오버헤드로 높은 처리량의 메타데이터 작업에 최적화되어 있습니다.
+
+## 개요
+
+Bingsan은 여러 성능 최적화를 구현합니다:
+
+- **오브젝트 풀링** - GC 압력 감소를 위한 메모리 버퍼 재사용
+- **분산 잠금** - 설정 가능한 타임아웃이 있는 PostgreSQL 기반 잠금
+- **최적화된 직렬화** - 빠른 JSON 인코딩을 위해 goccy/go-json 사용
+
+## 성능 목표
+
+| 메트릭 | 목표 | 일반 |
+|--------|------|------|
+| 테이블 메타데이터 직렬화 | <50ms | ~10us |
+| 대형 스키마 (100+ 컬럼) | <200ms | ~90us |
+| 메모리 할당 감소 | >=30% | 19-26% |
+| GC 일시정지 (p99) | <10ms | <5ms |
+| 풀 히트율 | >=80% | ~100% |
+
+## 설정
+
+풀 및 잠금 설정은 `config.yaml`에서 구성합니다:
+
+```yaml
+catalog:
+ lock_timeout: 30s
+ lock_retry_interval: 100ms
+ max_lock_retries: 100
+```
+
+또는 환경 변수를 통해:
+
+```bash
+ICEBERG_CATALOG_LOCK_TIMEOUT=30s
+ICEBERG_CATALOG_LOCK_RETRY_INTERVAL=100ms
+ICEBERG_CATALOG_MAX_LOCK_RETRIES=100
+```
+
+## 섹션
+
+- [오브젝트 풀링](/docs/performance/pooling) - 할당 감소를 위한 메모리 버퍼 재사용
+- [분산 잠금](/docs/performance/locking) - 재시도 로직이 있는 PostgreSQL 기반 잠금
+- [벤치마킹](/docs/performance/benchmarking) - Apache Polaris Tools를 사용한 부하 테스트
+- [메트릭](/docs/performance/metrics) - 모니터링을 위한 Prometheus 메트릭
+- [튜닝](/docs/performance/tuning) - 성능 튜닝 가이드라인
diff --git a/docs-site/content/docs/ko/performance/locking.mdx b/docs-site/content/docs/ko/performance/locking.mdx
new file mode 100644
index 0000000..7a0b9b2
--- /dev/null
+++ b/docs-site/content/docs/ko/performance/locking.mdx
@@ -0,0 +1,160 @@
+---
+title: 분산 잠금
+description: 재시도 로직이 있는 PostgreSQL 기반 잠금
+---
+
+# 분산 잠금
+
+Bingsan은 설정 가능한 타임아웃과 재시도 로직이 있는 PostgreSQL 행 레벨 잠금을 사용하여 여러 인스턴스에서 동시 수정을 안전하게 처리합니다.
+
+## 개요
+
+여러 Bingsan 인스턴스가 동시에 같은 리소스를 수정할 때, 잠금은 다음을 보장합니다:
+
+- **일관성**: 한 번에 하나의 작업만 리소스 수정
+- **격리**: 작업이 부분적 상태를 보지 않음
+- **자동 복구**: 실패한 잠금은 백오프와 함께 재시도
+
+## 설정
+
+`config.yaml`을 통해 잠금 구성:
+
+```yaml
+catalog:
+ lock_timeout: 30s
+ lock_retry_interval: 100ms
+ max_lock_retries: 100
+```
+
+### 환경 변수
+
+```bash
+ICEBERG_CATALOG_LOCK_TIMEOUT=30s
+ICEBERG_CATALOG_LOCK_RETRY_INTERVAL=100ms
+ICEBERG_CATALOG_MAX_LOCK_RETRIES=100
+```
+
+## 동작 방식
+
+### 잠금 획득 흐름
+
+1. 트랜잭션 시작
+2. SET LOCAL lock_timeout = '30s'
+3. 작업 실행 (SELECT ... FOR UPDATE)
+4. 성공 시: 트랜잭션 커밋
+5. 잠금 타임아웃 (55P03) 시: 롤백, retry_interval 대기, 재시도
+6. max_retries 초과 시: ErrLockTimeout 반환
+
+### PostgreSQL 잠금 타임아웃
+
+각 트랜잭션은 로컬로 `lock_timeout`을 설정합니다:
+
+```sql
+BEGIN;
+SET LOCAL lock_timeout = '30000ms';
+SELECT * FROM tables WHERE id = $1 FOR UPDATE;
+-- ... 업데이트 수행 ...
+COMMIT;
+```
+
+## 설정 옵션
+
+| 옵션 | 기본값 | 설명 |
+|------|--------|------|
+| `lock_timeout` | 30s | 단일 잠금 시도 최대 대기 시간 |
+| `lock_retry_interval` | 100ms | 재시도 사이 대기 시간 |
+| `max_lock_retries` | 100 | 실패 전 최대 재시도 횟수 |
+
+### 총 대기 시간
+
+```
+max_wait = lock_timeout + (max_lock_retries × lock_retry_interval)
+ = 30s + (100 × 100ms)
+ = 40s
+```
+
+## 튜닝 가이드라인
+
+### 높은 경합 워크로드
+
+```yaml
+catalog:
+ lock_timeout: 5s
+ lock_retry_interval: 50ms
+ max_lock_retries: 200
+```
+
+### 낮은 경합 워크로드
+
+```yaml
+catalog:
+ lock_timeout: 60s
+ lock_retry_interval: 500ms
+ max_lock_retries: 10
+```
+
+### 배치 처리
+
+```yaml
+catalog:
+ lock_timeout: 120s
+ lock_retry_interval: 1s
+ max_lock_retries: 60
+```
+
+## 에러 처리
+
+### ErrLockTimeout
+
+모든 재시도가 소진되면 반환됩니다. 클라이언트가 작업을 재시도해야 합니다.
+
+### 직렬화 실패
+
+PostgreSQL 직렬화 에러 (40001)도 감지되어 처리됩니다.
+
+## 모니터링
+
+### 잠금 대기 메트릭
+
+PostgreSQL을 통해 잠금 경합 모니터링:
+
+```sql
+-- 활성 잠금
+SELECT * FROM pg_locks WHERE NOT granted;
+
+-- 잠금 대기 통계
+SELECT * FROM pg_stat_activity
+WHERE wait_event_type = 'Lock';
+```
+
+## 모범 사례
+
+### 트랜잭션 짧게 유지
+
+```go
+// 좋음: 잠금 내 최소 작업
+err := db.WithLock(ctx, cfg, func(tx pgx.Tx) error {
+ return tx.Exec(ctx, "UPDATE tables SET ...")
+})
+
+// 나쁨: 잠금 내 외부 호출
+err := db.WithLock(ctx, cfg, func(tx pgx.Tx) error {
+ callExternalService() // 느릴 수 있음!
+ return tx.Exec(ctx, "UPDATE tables SET ...")
+})
+```
+
+## 문제 해결
+
+### 빈번한 잠금 타임아웃
+
+**원인**:
+- 같은 테이블에 대한 높은 쓰기 경합
+- 잠금을 유지하는 장기 실행 트랜잭션
+- 데이터베이스 성능 문제
+
+**해결책**:
+1. `max_lock_retries` 늘리기
+2. `lock_timeout` 줄이기 (더 빨리 실패, 더 빨리 재시도)
+3. 잠금을 유지하는 느린 쿼리 확인
+4. 다른 테이블 간 워크로드 분산
diff --git a/docs-site/content/docs/ko/performance/meta.json b/docs-site/content/docs/ko/performance/meta.json
new file mode 100644
index 0000000..bed293b
--- /dev/null
+++ b/docs-site/content/docs/ko/performance/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "성능",
+ "pages": ["index", "pooling", "locking", "benchmarking", "metrics", "tuning"]
+}
diff --git a/docs-site/content/docs/ko/performance/metrics.mdx b/docs-site/content/docs/ko/performance/metrics.mdx
new file mode 100644
index 0000000..ac02fc2
--- /dev/null
+++ b/docs-site/content/docs/ko/performance/metrics.mdx
@@ -0,0 +1,178 @@
+---
+title: 메트릭
+description: Bingsan 성능 모니터링을 위한 Prometheus 메트릭
+---
+
+# 성능 메트릭
+
+Bingsan은 `/metrics` 엔드포인트에서 Prometheus를 통해 성능 메트릭을 노출합니다.
+
+## 풀 메트릭
+
+오브젝트 풀 활용 메트릭은 메모리 효율성 모니터링에 도움이 됩니다.
+
+### bingsan_pool_gets_total
+
+**유형**: Counter
+**레이블**: `pool`
+
+풀의 총 `Get()` 작업 수.
+
+```text
+# 풀별 가져오기 비율
+rate(bingsan_pool_gets_total[5m])
+
+# 풀 유형별 총 가져오기
+sum by (pool) (bingsan_pool_gets_total)
+```
+
+### bingsan_pool_returns_total
+
+**유형**: Counter
+**레이블**: `pool`
+
+항목을 풀로 반환하는 성공적인 `Put()` 작업의 총 수.
+
+```text
+# 풀 효율성 (반환/가져오기)
+rate(bingsan_pool_returns_total{pool="buffer"}[5m])
+/ rate(bingsan_pool_gets_total{pool="buffer"}[5m])
+```
+
+### bingsan_pool_discards_total
+
+**유형**: Counter
+**레이블**: `pool`
+
+폐기된 항목(과대 또는 유효하지 않음)의 총 수.
+
+```text
+# 폐기 비율
+rate(bingsan_pool_discards_total{pool="buffer"}[5m])
+/ rate(bingsan_pool_gets_total{pool="buffer"}[5m]) * 100
+```
+
+### bingsan_pool_misses_total
+
+**유형**: Counter
+**레이블**: `pool`
+
+새 할당이 필요한 풀 미스의 총 수.
+
+```text
+# 히트율 (추정)
+1 - (rate(bingsan_pool_misses_total[5m]) / rate(bingsan_pool_gets_total[5m]))
+```
+
+## 풀 레이블
+
+| 레이블 | 값 | 설명 |
+|--------|-----|------|
+| `pool` | `buffer`, `bytes` | 풀 유형 식별자 |
+
+## 알림 규칙
+
+### 낮은 풀 히트율
+
+```yaml
+- alert: LowPoolHitRate
+ expr: |
+ (rate(bingsan_pool_returns_total{pool="buffer"}[5m])
+ / rate(bingsan_pool_gets_total{pool="buffer"}[5m])) < 0.8
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "풀 히트율 80% 미만"
+```
+
+### 높은 폐기율
+
+```yaml
+- alert: HighPoolDiscardRate
+ expr: rate(bingsan_pool_discards_total{pool="buffer"}[5m]) > 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "높은 풀 폐기율"
+```
+
+## 레코딩 규칙
+
+비싼 쿼리 사전 계산:
+
+```yaml
+groups:
+ - name: bingsan_pool_recording
+ interval: 30s
+ rules:
+ - record: bingsan:pool_hit_rate:5m
+ expr: |
+ rate(bingsan_pool_returns_total[5m])
+ / rate(bingsan_pool_gets_total[5m])
+
+ - record: bingsan:pool_discard_rate:5m
+ expr: rate(bingsan_pool_discards_total[5m])
+```
+
+## 메트릭 해석
+
+### 건강한 풀
+
+```
+gets_total: 1,000,000
+returns_total: 1,000,000
+discards_total: 50
+misses_total: 100
+
+활용률: 100% (반환 = 가져오기)
+폐기율: 0.005%
+미스율: 0.01%
+```
+
+### 누수가 있는 풀
+
+```
+gets_total: 1,000,000
+returns_total: 800,000 ← 200,000 누락!
+discards_total: 100
+misses_total: 200,100 ← 높은 미스
+
+활용률: 80%
+미스율: 20%
+```
+
+**조치**: 누락된 `defer pool.Put()` 호출 확인.
+
+### 큰 응답이 있는 풀
+
+```
+gets_total: 1,000,000
+returns_total: 700,000
+discards_total: 300,000 ← 높은 폐기!
+misses_total: 50
+
+활용률: 70%
+폐기율: 30%
+```
+
+**조치**: 스키마가 크면 `MaxBufferSize` 늘리기 고려.
+
+## 헬스 체크 엔드포인트
+
+### /health
+
+기본 헬스 체크 (정상이면 200 반환):
+
+```bash
+curl http://localhost:8181/health
+```
+
+### /metrics
+
+Prometheus 메트릭 엔드포인트:
+
+```bash
+curl http://localhost:8181/metrics | grep bingsan_pool
+```
diff --git a/docs-site/content/docs/ko/performance/pooling.mdx b/docs-site/content/docs/ko/performance/pooling.mdx
new file mode 100644
index 0000000..a7a1b6d
--- /dev/null
+++ b/docs-site/content/docs/ko/performance/pooling.mdx
@@ -0,0 +1,115 @@
+---
+title: 오브젝트 풀링
+description: 할당 감소를 위한 메모리 버퍼 재사용
+---
+
+# 오브젝트 풀링
+
+Bingsan은 Go 표준 라이브러리의 `sync.Pool`을 사용하여 핫 패스에서 메모리 할당 압력을 줄입니다.
+
+## 개요
+
+두 가지 유형의 풀이 구현되어 있습니다:
+
+| 풀 | 용도 | 기본 크기 | 최대 크기 |
+|----|------|----------|----------|
+| **BufferPool** | JSON 직렬화 버퍼 | 4 KB | 64 KB |
+| **BytePool** | OAuth 토큰 생성 | 32 bytes | 32 bytes |
+
+## 동작 방식
+
+### BufferPool
+
+`BufferPool`은 JSON 직렬화를 위한 재사용 가능한 `bytes.Buffer` 인스턴스를 제공합니다:
+
+```
+요청 1 ──► 버퍼 가져오기 ──► JSON 직렬화 ──► 버퍼 반환 ──► 풀
+ │ ▲
+ └──────────────────────────────────┘
+ 재사용됨
+```
+
+**주요 특성:**
+- 초기 용량: 4 KB (일반적인 JSON 메타데이터 크기)
+- 최대 크기: 64 KB (과대 버퍼는 폐기)
+- `sync.Pool`을 통한 스레드 안전
+- 가져올 때 자동 리셋
+
+### BytePool
+
+고정 크기: OAuth 액세스 토큰 생성용 32 bytes.
+
+## 사용 패턴
+
+### API 핸들러에서
+
+```go
+func (h *Handler) GetTable(ctx *fiber.Ctx) error {
+ buf := pool.GetBuffer()
+ defer pool.PutBuffer(buf) // 항상 반환!
+
+ encoder := json.NewEncoder(buf)
+ if err := encoder.Encode(table); err != nil {
+ return err
+ }
+
+ return ctx.Send(buf.Bytes())
+}
+```
+
+## 설정
+
+| 상수 | 값 | 설명 |
+|------|-----|------|
+| `DefaultBufferSize` | 4096 | 초기 버퍼 용량 (bytes) |
+| `MaxBufferSize` | 65536 | 폐기 전 최대 버퍼 크기 |
+| `TokenSize` | 32 | 토큰 byte slice 고정 크기 |
+
+## 모범 사례
+
+### 항상 `defer` 사용
+
+```go
+buf := pool.GetBuffer()
+defer pool.PutBuffer(buf) // 반환 보장
+```
+
+### 참조 유지하지 않기
+
+```go
+// 잘못됨: 참조가 이스케이프
+data := buf.Bytes()
+pool.PutBuffer(buf)
+return data // data가 이제 유효하지 않음!
+
+// 올바름: 필요시 복사
+data := make([]byte, buf.Len())
+copy(data, buf.Bytes())
+pool.PutBuffer(buf)
+return data
+```
+
+## 메트릭
+
+풀 성능은 Prometheus를 통해 노출됩니다:
+
+| 메트릭 | 유형 | 설명 |
+|--------|------|------|
+| `bingsan_pool_gets_total` | Counter | 총 Get() 작업 |
+| `bingsan_pool_returns_total` | Counter | 총 Put() 작업 |
+| `bingsan_pool_discards_total` | Counter | 폐기된 과대 항목 |
+| `bingsan_pool_misses_total` | Counter | 새 할당 (풀 비어있음) |
+
+## 벤치마크
+
+```bash
+go test -bench=BenchmarkPool -benchmem ./tests/benchmark/...
+```
+
+예상 결과:
+
+| 벤치마크 | 시간 | 할당 |
+|----------|------|------|
+| BufferPool.Get/Put | ~50ns | 0 |
+| BufferPool.Concurrent | ~100ns | 0 |
+| BytePool.Get/Put | ~30ns | 0 |
diff --git a/docs-site/content/docs/ko/performance/tuning.mdx b/docs-site/content/docs/ko/performance/tuning.mdx
new file mode 100644
index 0000000..274ce74
--- /dev/null
+++ b/docs-site/content/docs/ko/performance/tuning.mdx
@@ -0,0 +1,214 @@
+---
+title: 튜닝
+description: Bingsan 성능 튜닝 가이드라인
+---
+
+# 성능 튜닝
+
+이 가이드는 워크로드 특성에 따른 최적의 성능을 위해 Bingsan을 튜닝하는 방법을 다룹니다.
+
+## 빠른 참조
+
+| 워크로드 | 잠금 타임아웃 | 재시도 간격 | 최대 재시도 | 버퍼 크기 |
+|----------|--------------|------------|-------------|----------|
+| 저지연 | 5s | 50ms | 20 | 4KB |
+| 고처리량 | 30s | 100ms | 100 | 4KB |
+| 대형 스키마 | 30s | 100ms | 100 | 8-16KB |
+| 배치 처리 | 120s | 1s | 60 | 4KB |
+
+## 워크로드 프로파일
+
+### 저지연
+
+처리량보다 빠른 응답 우선:
+
+```yaml
+catalog:
+ lock_timeout: 5s
+ lock_retry_interval: 50ms
+ max_lock_retries: 20
+
+server:
+ read_timeout: 10s
+ write_timeout: 10s
+
+database:
+ max_open_conns: 50
+ max_idle_conns: 25
+```
+
+### 고처리량
+
+초당 요청 최대화:
+
+```yaml
+catalog:
+ lock_timeout: 30s
+ lock_retry_interval: 100ms
+ max_lock_retries: 100
+
+server:
+ read_timeout: 60s
+ write_timeout: 60s
+ idle_timeout: 300s
+
+database:
+ max_open_conns: 100
+ max_idle_conns: 50
+ conn_max_lifetime: 30m
+```
+
+### 배치 처리
+
+대량 작업용:
+
+```yaml
+catalog:
+ lock_timeout: 120s
+ lock_retry_interval: 1s
+ max_lock_retries: 60
+
+server:
+ read_timeout: 300s
+ write_timeout: 300s
+
+database:
+ max_open_conns: 25
+ conn_max_lifetime: 60m
+```
+
+## 증상별 튜닝
+
+### 높은 지연시간
+
+**진단:**
+```text
+# 잠금 대기 시간 확인
+rate(iceberg_db_wait_duration_seconds_total[5m])
+
+# 풀 폐기율 확인
+rate(bingsan_pool_discards_total[5m])
+
+# 커넥션 포화 확인
+iceberg_db_connections_in_use / iceberg_db_connections_max
+```
+
+**해결책:**
+1. 잠금 경합 - `lock_timeout` 줄이기, `max_lock_retries` 늘리기
+2. 풀 폐기 - 대형 스키마용 `MaxBufferSize` 늘리기
+3. 커넥션 풀 - `max_open_conns` 늘리기
+
+### 높은 메모리 사용량
+
+**진단:**
+```bash
+curl http://localhost:8181/debug/pprof/heap > heap.prof
+go tool pprof heap.prof
+```
+
+**해결책:**
+1. 버퍼 누수 - 모든 코드 경로가 버퍼를 반환하는지 확인
+2. 큰 버퍼 - `MaxBufferSize` 줄이기
+3. 커넥션 비대 - `max_open_conns` 줄이기
+
+### 잠금 타임아웃 에러
+
+**진단:**
+```sql
+SELECT * FROM pg_locks WHERE NOT granted;
+SELECT * FROM pg_stat_activity WHERE wait_event_type = 'Lock';
+```
+
+**해결책:**
+1. 높은 경합 - `max_lock_retries` 늘리기
+2. 느린 트랜잭션 - 트랜잭션 짧게 유지
+3. 데드락 - Bingsan이 자동으로 처리
+
+## 데이터베이스 튜닝
+
+### PostgreSQL 설정
+
+```sql
+ALTER SYSTEM SET max_connections = 500;
+ALTER SYSTEM SET lock_timeout = '30s';
+ALTER SYSTEM SET statement_timeout = '60s';
+ALTER SYSTEM SET effective_cache_size = '12GB';
+ALTER SYSTEM SET shared_buffers = '4GB';
+```
+
+### PgBouncer를 통한 커넥션 풀링
+
+```ini
+[databases]
+iceberg_catalog = host=postgres port=5432 dbname=iceberg_catalog
+
+[pgbouncer]
+pool_mode = transaction
+max_client_conn = 1000
+default_pool_size = 50
+```
+
+## 리소스 크기
+
+### 메모리
+
+```
+memory_per_instance = base + (concurrent_requests × request_memory)
+ ≈ 50MB + (500 × 100KB)
+ ≈ 100MB 일반
+ ≈ 200MB 피크
+```
+
+### CPU
+
+```
+cpu_per_instance ≈ 0.2 코어 유휴
+ ≈ 1 코어 부하 시
+```
+
+### 인스턴스
+
+```
+instances = (peak_rps / rps_per_instance) × 1.5
+```
+
+## 프로파일링
+
+### CPU 프로파일
+
+```bash
+curl http://localhost:8181/debug/pprof/profile?seconds=30 > cpu.prof
+go tool pprof -http=:8080 cpu.prof
+```
+
+### 메모리 프로파일
+
+```bash
+curl http://localhost:8181/debug/pprof/heap > heap.prof
+go tool pprof -http=:8080 heap.prof
+```
+
+### 추적
+
+```bash
+curl http://localhost:8181/debug/pprof/trace?seconds=5 > trace.out
+go tool trace trace.out
+```
+
+## 체크리스트
+
+### 프로덕션 전
+
+- [ ] 워크로드에 적합한 `lock_timeout` 설정
+- [ ] 예상 부하에 따른 `max_open_conns` 구성
+- [ ] Prometheus 메트릭 수집 활성화
+- [ ] 풀 상태 알림 설정
+- [ ] 현실적인 데이터로 부하 테스트 실행
+
+### 프로덕션 모니터링
+
+- [ ] 풀 히트율 > 80%
+- [ ] 풀 폐기율 < 1%
+- [ ] 잠금 타임아웃율 < 1%
+- [ ] 커넥션 활용률 < 90%
+- [ ] GC 일시정지 p99 < 10ms
diff --git a/docs-site/content/docs/meta.json b/docs-site/content/docs/meta.json
new file mode 100644
index 0000000..6c876b3
--- /dev/null
+++ b/docs-site/content/docs/meta.json
@@ -0,0 +1,13 @@
+{
+ "root": true,
+ "pages": [
+ "getting-started",
+ "integrations",
+ "api",
+ "configuration",
+ "architecture",
+ "performance",
+ "deployment",
+ "contributing"
+ ]
+}
diff --git a/docs-site/content/docs/performance/_index.md b/docs-site/content/docs/performance/_index.md
deleted file mode 100644
index 53cfe2c..0000000
--- a/docs-site/content/docs/performance/_index.md
+++ /dev/null
@@ -1,78 +0,0 @@
----
-title: "Performance"
-weight: 5
-bookCollapseSection: true
----
-
-# Performance
-
-Bingsan is optimized for high-throughput metadata operations with minimal memory overhead. This section covers the performance features, tuning options, and benchmarking capabilities.
-
-## Overview
-
-Bingsan implements several performance optimizations:
-
-- **Object Pooling** - Reuses memory buffers to reduce GC pressure
-- **Distributed Locking** - PostgreSQL-based locks with configurable timeouts
-- **Optimized Serialization** - Uses goccy/go-json for fast JSON encoding
-
-```
-┌─────────────────────────────────────────────────────────────┐
-│ Request Flow │
-└─────────────────────────┬───────────────────────────────────┘
- │
- ▼
-┌─────────────────────────────────────────────────────────────┐
-│ Object Pool │
-│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
-│ │ BufferPool │ │ BytePool │ │ Metrics │ │
-│ │ (JSON/IO) │ │ (Tokens) │ │ (Prometheus)│ │
-│ └─────────────┘ └─────────────┘ └─────────────┘ │
-└─────────────────────────┬───────────────────────────────────┘
- │
- ▼
-┌─────────────────────────────────────────────────────────────┐
-│ PostgreSQL Locking │
-│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
-│ │ lock_timeout│ │ Retries │ │ Advisory │ │
-│ │ (per-tx) │ │ (backoff) │ │ Locks │ │
-│ └─────────────┘ └─────────────┘ └─────────────┘ │
-└─────────────────────────────────────────────────────────────┘
-```
-
-## Performance Targets
-
-| Metric | Target | Typical |
-|--------|--------|---------|
-| Table metadata serialization | <50ms | ~10µs |
-| Large schema (100+ cols) | <200ms | ~90µs |
-| Memory allocation reduction | ≥30% | 19-26% |
-| GC pause (p99) | <10ms | <5ms |
-| Pool hit rate | ≥80% | ~100% |
-
-## Configuration
-
-Pool and locking settings are configured in `config.yaml`:
-
-```yaml
-catalog:
- lock_timeout: 30s
- lock_retry_interval: 100ms
- max_lock_retries: 100
-```
-
-Or via environment variables:
-
-```bash
-ICEBERG_CATALOG_LOCK_TIMEOUT=30s
-ICEBERG_CATALOG_LOCK_RETRY_INTERVAL=100ms
-ICEBERG_CATALOG_MAX_LOCK_RETRIES=100
-```
-
-## Sections
-
-- [Object Pooling]({{< relref "/docs/performance/pooling" >}}) - Memory buffer reuse for reduced allocations
-- [Distributed Locking]({{< relref "/docs/performance/locking" >}}) - PostgreSQL-based locking with retry logic
-- [Benchmarking]({{< relref "/docs/performance/benchmarking" >}}) - Load testing with Apache Polaris Tools
-- [Metrics]({{< relref "/docs/performance/metrics" >}}) - Prometheus metrics for monitoring
-- [Tuning]({{< relref "/docs/performance/tuning" >}}) - Performance tuning guidelines
diff --git a/docs-site/content/docs/performance/benchmarking.md b/docs-site/content/docs/performance/benchmarking.md
deleted file mode 100644
index b4dcc42..0000000
--- a/docs-site/content/docs/performance/benchmarking.md
+++ /dev/null
@@ -1,403 +0,0 @@
----
-title: "Benchmarking"
-weight: 3
----
-
-# Benchmarking
-
-Bingsan includes comprehensive benchmarking support using both Go's built-in benchmarks and the Apache Polaris Tools Gatling framework for load testing.
-
-## Quick Start
-
-### Go Benchmarks
-
-Run micro-benchmarks:
-
-```bash
-# All benchmarks
-go test -bench=. -benchmem ./tests/benchmark/...
-
-# Specific benchmark
-go test -bench=BenchmarkTable -benchmem ./tests/benchmark/...
-
-# Pool benchmarks
-go test -bench=BenchmarkPool -benchmem ./tests/benchmark/...
-```
-
-### Load Testing with Polaris Tools
-
-```bash
-cd benchmarks
-
-# One-time setup
-make setup
-
-# Start Bingsan with OAuth2
-make start-bingsan
-
-# Create test dataset
-make create-dataset
-
-# Run read benchmark
-make read-benchmark
-
-# View results
-make report
-```
-
-## Go Benchmarks
-
-### Available Benchmarks
-
-| Benchmark | Description |
-|-----------|-------------|
-| `BenchmarkBaseline` | Baseline without pooling |
-| `BenchmarkPool` | With object pooling |
-| `BenchmarkTable` | Table operations |
-| `BenchmarkNamespace` | Namespace operations |
-| `BenchmarkConcurrent` | Concurrent load |
-| `BenchmarkMemory` | Memory allocation |
-
-### Running Benchmarks
-
-**Basic run:**
-```bash
-go test -bench=. -benchmem ./tests/benchmark/...
-```
-
-**Compare before/after:**
-```bash
-# Baseline
-go test -bench=. -benchmem ./tests/benchmark/... | tee baseline.txt
-
-# After changes
-go test -bench=. -benchmem ./tests/benchmark/... | tee optimized.txt
-
-# Compare
-benchstat baseline.txt optimized.txt
-```
-
-**With CPU profile:**
-```bash
-go test -bench=BenchmarkTable -cpuprofile=cpu.prof ./tests/benchmark/...
-go tool pprof cpu.prof
-```
-
-**With memory profile:**
-```bash
-go test -bench=BenchmarkTable -memprofile=mem.prof ./tests/benchmark/...
-go tool pprof mem.prof
-```
-
-### Expected Results
-
-| Benchmark | Ops/sec | ns/op | B/op | allocs/op |
-|-----------|---------|-------|------|-----------|
-| TableMetadata | 100,000+ | <10,000 | <5,000 | <50 |
-| LargeSchema | 10,000+ | <100,000 | <50,000 | <200 |
-| PoolGet/Put | 20,000,000+ | <50 | 0 | 0 |
-| Concurrent-100 | 1,000,000+ | <3,000 | <1,000 | <10 |
-
----
-
-## Polaris Tools Load Testing
-
-Bingsan supports load testing with [Apache Polaris Tools](https://github.com/apache/polaris-tools), a Gatling-based benchmark framework.
-
-### Prerequisites
-
-- **Java 17+** - Required for Gatling
-- **Docker & Docker Compose** - For running Bingsan
-- **Make** - For running benchmark commands
-
-```bash
-# Install Java (macOS)
-brew install openjdk@17
-
-# Verify
-java -version # Should show 17 or higher
-```
-
-### Setup
-
-```bash
-cd benchmarks
-
-# Clone polaris-tools and build
-make setup
-
-# Verify setup
-ls polaris-tools/benchmarks/ # Should exist
-```
-
-### Running Benchmarks
-
-**Start the benchmark stack:**
-```bash
-make start-bingsan
-```
-
-This starts Bingsan with:
-- OAuth2 authentication enabled
-- Polaris compatibility mode
-- PostgreSQL database
-- Exposed on port 8181
-
-**Create test dataset:**
-```bash
-make create-dataset
-```
-
-Creates namespaces, tables, and views for testing.
-
-**Run benchmarks:**
-
-| Command | Description |
-|---------|-------------|
-| `make read-benchmark` | Read-only operations |
-| `make read-update-benchmark` | Mixed read/write (80/20) |
-| `make create-commits-benchmark` | Commit throughput |
-| `make weighted-benchmark` | Weighted workload simulation |
-| `make full-benchmark` | All benchmarks sequentially |
-
-**View results:**
-```bash
-make report
-```
-
-Opens the HTML report in your browser.
-
-### Configuration
-
-Edit `config/bingsan.conf`:
-
-```hocon
-# Connection
-http.base-url = "http://localhost:8181"
-
-# Authentication
-auth.client-id = "benchmark-client"
-auth.client-secret = "benchmark-secret"
-
-# Dataset size
-dataset {
- namespace-width = 2 # Namespaces per level
- namespace-depth = 3 # Namespace tree depth
- tables-per-namespace = 5
- views-per-namespace = 3
-}
-
-# Workload settings
-workload.read-update-tree-dataset {
- read-write-ratio = 0.8 # 80% read, 20% write
- throughput = "50/sec" # Target throughput
- duration-in-minutes = 3 # Test duration
-}
-```
-
-### Benchmark Simulations
-
-#### CreateTreeDataset
-Creates test data structure:
-- Hierarchical namespace tree
-- Tables and views in each namespace
-- Must run before other benchmarks
-
-#### ReadTreeDataset
-Read-only operations:
-- List namespaces
-- Get table/view metadata
-- Check existence
-
-#### ReadUpdateTreeDataset
-Mixed workload:
-- Configurable read/write ratio
-- Property updates
-- New resource creation
-
-#### CreateCommits
-Commit throughput:
-- Table commits at configurable rate
-- Measures commit latency
-- Tests lock contention
-
-#### WeightedWorkloadOnTreeDataset
-Realistic simulation:
-- Multiple reader/writer groups
-- Configurable timing variance
-- Tests conflict handling
-
-### Results
-
-Results are stored in:
-```
-polaris-tools/benchmarks/build/reports/gatling//
-```
-
-Each report includes:
-- **Response time distribution** (p50, p95, p99)
-- **Requests per second** over time
-- **Active users** (concurrent load)
-- **Error breakdown**
-
----
-
-## Polaris Compatibility Mode
-
-Polaris Tools expects a Polaris-compatible API. Bingsan enables this via:
-
-```yaml
-compat:
- polaris_enabled: true
-```
-
-This adds:
-- **Path rewriting**: `/api/catalog/v1/{catalog}/...` → `/v1/...`
-- **Polaris OAuth endpoint**: `/api/catalog/v1/oauth/tokens`
-- **Mock Management API**: `/api/management/v1/catalogs`
-
-{{< hint warning >}}
-**Production Warning**: Keep `polaris_enabled: false` in production. This mode is only for benchmark compatibility.
-{{< /hint >}}
-
----
-
-## Comparing Results
-
-### Manual Comparison
-
-1. Run baseline benchmark, save report path
-2. Make changes to Bingsan
-3. Run benchmark again
-4. Compare HTML reports side by side
-
-### Using benchstat
-
-For Go benchmarks:
-
-```bash
-# Install benchstat
-go install golang.org/x/perf/cmd/benchstat@latest
-
-# Run comparison
-benchstat baseline.txt optimized.txt
-```
-
-Output:
-```
-name old time/op new time/op delta
-TableMeta-8 15.2µs ± 2% 10.1µs ± 1% -33.55% (p=0.000 n=10+10)
-LargeSchema-8 120µs ± 3% 90µs ± 2% -25.00% (p=0.000 n=10+10)
-
-name old alloc/op new alloc/op delta
-TableMeta-8 4.86kB ± 0% 3.84kB ± 0% -20.99% (p=0.000 n=10+10)
-LargeSchema-8 48.2kB ± 0% 38.5kB ± 0% -20.12% (p=0.000 n=10+10)
-```
-
----
-
-## Troubleshooting
-
-### Java Version Error
-
-```bash
-# Check version
-java -version
-
-# Set JAVA_HOME (macOS)
-export JAVA_HOME=$(/usr/libexec/java_home -v 17)
-```
-
-### Connection Refused
-
-```bash
-# Check if Bingsan is running
-docker ps | grep bingsan-bench
-
-# View logs
-make logs
-
-# Test connectivity
-make quick-test
-```
-
-### OAuth2 Errors
-
-```bash
-# Verify OAuth2 is enabled
-curl http://localhost:8181/v1/config
-
-# Test token endpoint
-curl -X POST http://localhost:8181/v1/oauth/tokens \
- -d "grant_type=client_credentials&client_id=benchmark-client&client_secret=benchmark-secret"
-```
-
-### Out of Memory
-
-Edit `polaris-tools/benchmarks/build.gradle.kts`:
-
-```kotlin
-gatling {
- jvmArgs = listOf("-Xmx2g", "-Xms1g")
-}
-```
-
-### Cleanup
-
-```bash
-# Remove polaris-tools
-make clean
-
-# Remove docker volumes
-make clean-data
-
-# Stop Bingsan
-make stop-bingsan
-```
-
----
-
-## CI/CD Integration
-
-### GitHub Actions Example
-
-```yaml
-name: Benchmark
-
-on:
- push:
- branches: [main]
- pull_request:
-
-jobs:
- benchmark:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - uses: actions/setup-go@v5
- with:
- go-version: '1.23'
-
- - name: Run benchmarks
- run: |
- go test -bench=. -benchmem ./tests/benchmark/... | tee results.txt
-
- - name: Compare with baseline
- run: |
- benchstat baseline.txt results.txt
-```
-
-### Performance Gate
-
-Fail CI if performance regresses:
-
-```bash
-#!/bin/bash
-# scripts/check-perf.sh
-
-benchstat -delta-test=none baseline.txt results.txt | grep -E '\+[0-9]+\.[0-9]+%' && {
- echo "Performance regression detected!"
- exit 1
-}
-```
diff --git a/docs-site/content/docs/performance/locking.md b/docs-site/content/docs/performance/locking.md
deleted file mode 100644
index 6d527a6..0000000
--- a/docs-site/content/docs/performance/locking.md
+++ /dev/null
@@ -1,370 +0,0 @@
----
-title: "Distributed Locking"
-weight: 2
----
-
-# Distributed Locking
-
-Bingsan uses PostgreSQL row-level locking with configurable timeouts and retry logic to handle concurrent modifications safely across multiple instances.
-
-## Overview
-
-When multiple Bingsan instances modify the same resource simultaneously, locking ensures:
-
-- **Consistency**: Only one operation modifies a resource at a time
-- **Isolation**: Operations don't see partial states
-- **Automatic Recovery**: Failed locks are retried with backoff
-
-```
-Instance A ──┐ ┌── Instance B
- │ │
- ▼ ▼
- ┌──────────────────────────────────────┐
- │ PostgreSQL Lock │
- │ ┌──────────────────────────────┐ │
- │ │ SELECT ... FOR UPDATE │ │
- │ │ SET LOCAL lock_timeout │ │
- │ └──────────────────────────────┘ │
- │ │
- │ Instance A: Acquired ✓ │
- │ Instance B: Waiting... (timeout) │
- │ Retry after interval │
- └──────────────────────────────────────┘
-```
-
-## Configuration
-
-Configure locking via `config.yaml`:
-
-```yaml
-catalog:
- # Maximum time to wait for a lock
- lock_timeout: 30s
-
- # Time between retry attempts
- lock_retry_interval: 100ms
-
- # Maximum number of retries before failing
- max_lock_retries: 100
-```
-
-### Environment Variables
-
-```bash
-ICEBERG_CATALOG_LOCK_TIMEOUT=30s
-ICEBERG_CATALOG_LOCK_RETRY_INTERVAL=100ms
-ICEBERG_CATALOG_MAX_LOCK_RETRIES=100
-```
-
-## How It Works
-
-### Lock Acquisition Flow
-
-```
-┌─────────────────────────────────────────────────────────────────┐
-│ Lock Acquisition Flow │
-└─────────────────────────────────────────────────────────────────┘
-
-1. Begin Transaction
- │
- ▼
-2. SET LOCAL lock_timeout = '30s'
- │
- ▼
-3. Execute operation (SELECT ... FOR UPDATE)
- │
- ├──Success──► 4. Commit Transaction ──► Done ✓
- │
- └──Lock Timeout (55P03)
- │
- ▼
- 5. Rollback Transaction
- │
- ▼
- 6. Wait retry_interval (100ms)
- │
- ▼
- 7. Retry count < max_retries?
- │
- ├──Yes──► Go to Step 1
- │
- └──No──► Return ErrLockTimeout ✗
-```
-
-### PostgreSQL Lock Timeout
-
-Each transaction sets `lock_timeout` locally:
-
-```sql
-BEGIN;
-SET LOCAL lock_timeout = '30000ms';
-SELECT * FROM tables WHERE id = $1 FOR UPDATE;
--- ... perform update ...
-COMMIT;
-```
-
-If the lock isn't acquired within `lock_timeout`, PostgreSQL returns error code `55P03`.
-
-### Retry Logic
-
-When a lock timeout occurs:
-
-1. **Transaction is rolled back** (no partial state)
-2. **Wait for retry interval** (prevents thundering herd)
-3. **Retry up to max_retries times**
-4. **Return error if all retries exhausted**
-
-## Configuration Options
-
-| Option | Default | Description |
-|--------|---------|-------------|
-| `lock_timeout` | 30s | Max time to wait for a single lock attempt |
-| `lock_retry_interval` | 100ms | Wait time between retry attempts |
-| `max_lock_retries` | 100 | Maximum retry attempts before failing |
-
-### Total Wait Time
-
-Maximum time before failure:
-
-```
-max_wait = lock_timeout + (max_lock_retries × lock_retry_interval)
- = 30s + (100 × 100ms)
- = 30s + 10s
- = 40s
-```
-
-## Use Cases
-
-### Table Commits
-
-When committing table changes:
-
-```go
-err := db.WithLock(ctx, db.LockConfig{
- Timeout: 30 * time.Second,
- RetryInterval: 100 * time.Millisecond,
- MaxRetries: 100,
-}, func(tx pgx.Tx) error {
- // Load current table state
- table, err := loadTable(ctx, tx, tableID)
- if err != nil {
- return err
- }
-
- // Validate requirements
- if err := validateRequirements(table, req); err != nil {
- return err
- }
-
- // Apply updates
- return updateTable(ctx, tx, tableID, updates)
-})
-```
-
-### Namespace Operations
-
-When modifying namespaces:
-
-```go
-err := db.WithLock(ctx, lockConfig, func(tx pgx.Tx) error {
- return createNamespace(ctx, tx, namespace)
-})
-```
-
-## Tuning Guidelines
-
-### High Contention Workloads
-
-If many clients modify the same tables:
-
-```yaml
-catalog:
- lock_timeout: 5s # Fail faster
- lock_retry_interval: 50ms # Retry more frequently
- max_lock_retries: 200 # More retry attempts
-```
-
-### Low Contention Workloads
-
-If tables are rarely modified concurrently:
-
-```yaml
-catalog:
- lock_timeout: 60s # Wait longer
- lock_retry_interval: 500ms # Less aggressive retries
- max_lock_retries: 10 # Fewer retries
-```
-
-### Batch Processing
-
-For batch jobs that can wait:
-
-```yaml
-catalog:
- lock_timeout: 120s # Very patient
- lock_retry_interval: 1s # Conservative retries
- max_lock_retries: 60 # 2 minute total wait
-```
-
-## Error Handling
-
-### ErrLockTimeout
-
-Returned when all retries are exhausted:
-
-```go
-if errors.Is(err, db.ErrLockTimeout) {
- // Lock could not be acquired
- return fiber.NewError(
- fiber.StatusConflict,
- "table is being modified by another operation",
- )
-}
-```
-
-### Context Cancellation
-
-If the context is cancelled during retry:
-
-```go
-ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
-defer cancel()
-
-err := db.WithLock(ctx, lockConfig, func(tx pgx.Tx) error {
- // ...
-})
-
-if errors.Is(err, context.DeadlineExceeded) {
- // Request timed out before lock acquired
-}
-```
-
-### Serialization Failures
-
-PostgreSQL serialization errors (40001) are also detected:
-
-```go
-if db.IsSerializationError(err) {
- // Concurrent modification detected
- // Client should retry
-}
-```
-
-## Monitoring
-
-### Lock Wait Metrics
-
-Monitor lock contention via PostgreSQL:
-
-```sql
--- Active locks
-SELECT * FROM pg_locks WHERE NOT granted;
-
--- Lock wait statistics
-SELECT * FROM pg_stat_activity
-WHERE wait_event_type = 'Lock';
-```
-
-### Application Metrics
-
-Track lock retries in your application:
-
-```promql
-# Lock timeout errors
-rate(iceberg_handler_errors_total{error="lock_timeout"}[5m])
-
-# Lock retry rate
-rate(iceberg_lock_retries_total[5m])
-```
-
-## Best Practices
-
-### Keep Transactions Short
-
-```go
-// Good: Minimal work inside lock
-err := db.WithLock(ctx, cfg, func(tx pgx.Tx) error {
- return tx.Exec(ctx, "UPDATE tables SET ...")
-})
-
-// Bad: External calls inside lock
-err := db.WithLock(ctx, cfg, func(tx pgx.Tx) error {
- callExternalService() // May be slow!
- return tx.Exec(ctx, "UPDATE tables SET ...")
-})
-```
-
-### Use Appropriate Timeouts
-
-```go
-// For quick operations
-cfg := db.LockConfig{
- Timeout: 5 * time.Second,
- RetryInterval: 50 * time.Millisecond,
- MaxRetries: 20,
-}
-
-// For batch operations
-cfg := db.LockConfig{
- Timeout: 60 * time.Second,
- RetryInterval: time.Second,
- MaxRetries: 10,
-}
-```
-
-### Handle Errors Gracefully
-
-```go
-err := db.WithLock(ctx, cfg, fn)
-switch {
-case errors.Is(err, db.ErrLockTimeout):
- return fiber.NewError(409, "resource busy")
-case errors.Is(err, context.Canceled):
- return fiber.NewError(499, "request cancelled")
-default:
- return err
-}
-```
-
-## Troubleshooting
-
-### Frequent Lock Timeouts
-
-**Symptoms**: Many `ErrLockTimeout` errors
-
-**Causes**:
-- High write contention on same tables
-- Long-running transactions holding locks
-- Database performance issues
-
-**Solutions**:
-1. Increase `max_lock_retries`
-2. Decrease `lock_timeout` (fail faster, retry sooner)
-3. Check for slow queries holding locks
-4. Partition workloads across different tables
-
-### Slow Lock Acquisition
-
-**Symptoms**: High latency on writes
-
-**Causes**:
-- PostgreSQL connection issues
-- Lock contention
-- Slow disk I/O
-
-**Solutions**:
-1. Check PostgreSQL performance
-2. Monitor `pg_stat_activity` for blocked queries
-3. Consider read replicas for read-heavy workloads
-
-### Deadlocks
-
-**Symptoms**: Transactions waiting indefinitely
-
-**Causes**:
-- Multiple tables locked in different orders
-
-**Solutions**:
-1. Bingsan acquires locks in consistent order
-2. PostgreSQL detects and breaks deadlocks (40P01)
-3. Retry logic handles broken deadlocks automatically
diff --git a/docs-site/content/docs/performance/metrics.md b/docs-site/content/docs/performance/metrics.md
deleted file mode 100644
index 7d8da5c..0000000
--- a/docs-site/content/docs/performance/metrics.md
+++ /dev/null
@@ -1,378 +0,0 @@
----
-title: "Metrics"
-weight: 4
----
-
-# Performance Metrics
-
-Bingsan exposes performance metrics via Prometheus at the `/metrics` endpoint. This page documents all performance-related metrics for monitoring and alerting.
-
-## Pool Metrics
-
-Object pool utilization metrics help monitor memory efficiency.
-
-### bingsan_pool_gets_total
-
-**Type**: Counter
-**Labels**: `pool`
-
-Total number of `Get()` operations on the pool.
-
-```promql
-# Get rate per pool
-rate(bingsan_pool_gets_total[5m])
-
-# Total gets by pool type
-sum by (pool) (bingsan_pool_gets_total)
-```
-
-### bingsan_pool_returns_total
-
-**Type**: Counter
-**Labels**: `pool`
-
-Total number of successful `Put()` operations returning items to the pool.
-
-```promql
-# Return rate
-rate(bingsan_pool_returns_total[5m])
-
-# Pool efficiency (returns/gets)
-rate(bingsan_pool_returns_total{pool="buffer"}[5m])
-/ rate(bingsan_pool_gets_total{pool="buffer"}[5m])
-```
-
-### bingsan_pool_discards_total
-
-**Type**: Counter
-**Labels**: `pool`
-
-Total number of discarded items (oversized or invalid).
-
-```promql
-# Discard rate (should be low)
-rate(bingsan_pool_discards_total[5m])
-
-# Discard percentage
-rate(bingsan_pool_discards_total{pool="buffer"}[5m])
-/ rate(bingsan_pool_gets_total{pool="buffer"}[5m]) * 100
-```
-
-### bingsan_pool_misses_total
-
-**Type**: Counter
-**Labels**: `pool`
-
-Total number of pool misses requiring new allocations.
-
-```promql
-# Miss rate
-rate(bingsan_pool_misses_total[5m])
-
-# Hit rate (estimated)
-1 - (rate(bingsan_pool_misses_total[5m]) / rate(bingsan_pool_gets_total[5m]))
-```
-
----
-
-## Pool Labels
-
-| Label | Values | Description |
-|-------|--------|-------------|
-| `pool` | `buffer`, `bytes` | Pool type identifier |
-
----
-
-## Grafana Dashboard
-
-### Pool Utilization Panel
-
-```json
-{
- "title": "Pool Utilization",
- "type": "stat",
- "targets": [
- {
- "expr": "100 * rate(bingsan_pool_returns_total{pool=\"buffer\"}[5m]) / rate(bingsan_pool_gets_total{pool=\"buffer\"}[5m])",
- "legendFormat": "Buffer Pool"
- }
- ],
- "options": {
- "reduceOptions": {
- "values": false,
- "calcs": ["lastNotNull"]
- }
- },
- "fieldConfig": {
- "defaults": {
- "unit": "percent",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {"color": "red", "value": 0},
- {"color": "yellow", "value": 70},
- {"color": "green", "value": 90}
- ]
- }
- }
- }
-}
-```
-
-### Pool Operations Rate
-
-```json
-{
- "title": "Pool Operations",
- "type": "graph",
- "targets": [
- {
- "expr": "rate(bingsan_pool_gets_total[5m])",
- "legendFormat": "Gets - {{pool}}"
- },
- {
- "expr": "rate(bingsan_pool_returns_total[5m])",
- "legendFormat": "Returns - {{pool}}"
- },
- {
- "expr": "rate(bingsan_pool_discards_total[5m])",
- "legendFormat": "Discards - {{pool}}"
- }
- ]
-}
-```
-
-### Pool Health Dashboard
-
-Complete dashboard JSON:
-
-```json
-{
- "title": "Bingsan Pool Health",
- "panels": [
- {
- "title": "Buffer Pool Utilization",
- "gridPos": {"x": 0, "y": 0, "w": 8, "h": 6},
- "type": "gauge",
- "targets": [{
- "expr": "100 * rate(bingsan_pool_returns_total{pool=\"buffer\"}[5m]) / rate(bingsan_pool_gets_total{pool=\"buffer\"}[5m])"
- }],
- "fieldConfig": {
- "defaults": {
- "unit": "percent",
- "min": 0,
- "max": 100
- }
- }
- },
- {
- "title": "Discard Rate",
- "gridPos": {"x": 8, "y": 0, "w": 8, "h": 6},
- "type": "stat",
- "targets": [{
- "expr": "rate(bingsan_pool_discards_total{pool=\"buffer\"}[5m])"
- }],
- "fieldConfig": {
- "defaults": {
- "unit": "ops",
- "thresholds": {
- "steps": [
- {"color": "green", "value": 0},
- {"color": "yellow", "value": 5},
- {"color": "red", "value": 20}
- ]
- }
- }
- }
- },
- {
- "title": "Pool Operations Over Time",
- "gridPos": {"x": 0, "y": 6, "w": 24, "h": 10},
- "type": "timeseries",
- "targets": [
- {"expr": "rate(bingsan_pool_gets_total[5m])", "legendFormat": "Gets {{pool}}"},
- {"expr": "rate(bingsan_pool_returns_total[5m])", "legendFormat": "Returns {{pool}}"},
- {"expr": "rate(bingsan_pool_discards_total[5m])", "legendFormat": "Discards {{pool}}"}
- ]
- }
- ]
-}
-```
-
----
-
-## Alerting Rules
-
-### Low Pool Hit Rate
-
-```yaml
-groups:
- - name: bingsan_pool
- rules:
- - alert: LowPoolHitRate
- expr: |
- (rate(bingsan_pool_returns_total{pool="buffer"}[5m])
- / rate(bingsan_pool_gets_total{pool="buffer"}[5m])) < 0.8
- for: 5m
- labels:
- severity: warning
- annotations:
- summary: "Pool hit rate below 80%"
- description: |
- Buffer pool hit rate is {{ $value | humanizePercentage }}.
- Consider checking for buffer leaks or memory pressure.
-```
-
-### High Discard Rate
-
-```yaml
- - alert: HighPoolDiscardRate
- expr: rate(bingsan_pool_discards_total{pool="buffer"}[5m]) > 10
- for: 5m
- labels:
- severity: warning
- annotations:
- summary: "High pool discard rate"
- description: |
- Discarding {{ $value | printf "%.1f" }} buffers/sec.
- Large responses may be impacting pool efficiency.
- Consider increasing MaxBufferSize.
-```
-
-### Pool Miss Spike
-
-```yaml
- - alert: PoolMissSpike
- expr: |
- rate(bingsan_pool_misses_total[5m])
- / rate(bingsan_pool_gets_total[5m]) > 0.3
- for: 2m
- labels:
- severity: info
- annotations:
- summary: "Pool miss rate elevated"
- description: |
- {{ $value | humanizePercentage }} of pool gets are misses.
- This may indicate increased load or pool pressure.
-```
-
----
-
-## Recording Rules
-
-Pre-compute expensive queries:
-
-```yaml
-groups:
- - name: bingsan_pool_recording
- interval: 30s
- rules:
- - record: bingsan:pool_hit_rate:5m
- expr: |
- rate(bingsan_pool_returns_total[5m])
- / rate(bingsan_pool_gets_total[5m])
-
- - record: bingsan:pool_discard_rate:5m
- expr: rate(bingsan_pool_discards_total[5m])
-
- - record: bingsan:pool_miss_rate:5m
- expr: |
- rate(bingsan_pool_misses_total[5m])
- / rate(bingsan_pool_gets_total[5m])
-```
-
-Use in alerts:
-
-```yaml
-- alert: LowPoolHitRate
- expr: bingsan:pool_hit_rate:5m{pool="buffer"} < 0.8
-```
-
----
-
-## Interpreting Metrics
-
-### Healthy Pool
-
-```
-gets_total: 1,000,000
-returns_total: 1,000,000
-discards_total: 50
-misses_total: 100
-
-Utilization: 100% (returns = gets)
-Discard rate: 0.005%
-Miss rate: 0.01%
-```
-
-### Pool with Leaks
-
-```
-gets_total: 1,000,000
-returns_total: 800,000 ← 200,000 missing!
-discards_total: 100
-misses_total: 200,100 ← High misses
-
-Utilization: 80%
-Miss rate: 20%
-```
-
-**Action**: Check for missing `defer pool.Put()` calls.
-
-### Pool with Large Responses
-
-```
-gets_total: 1,000,000
-returns_total: 700,000
-discards_total: 300,000 ← High discards!
-misses_total: 50
-
-Utilization: 70%
-Discard rate: 30%
-```
-
-**Action**: Consider increasing `MaxBufferSize` if schemas are large.
-
----
-
-## Health Check Endpoints
-
-### /health
-
-Basic health check (returns 200 if healthy):
-
-```bash
-curl http://localhost:8181/health
-```
-
-### /metrics
-
-Prometheus metrics endpoint:
-
-```bash
-curl http://localhost:8181/metrics | grep bingsan_pool
-```
-
-Example output:
-
-```
-# HELP bingsan_pool_gets_total Total number of pool Get() calls
-# TYPE bingsan_pool_gets_total counter
-bingsan_pool_gets_total{pool="buffer"} 15234
-bingsan_pool_gets_total{pool="bytes"} 8421
-
-# HELP bingsan_pool_returns_total Total number of pool Put() calls
-# TYPE bingsan_pool_returns_total counter
-bingsan_pool_returns_total{pool="buffer"} 15234
-bingsan_pool_returns_total{pool="bytes"} 8421
-
-# HELP bingsan_pool_discards_total Total number of discarded pool items
-# TYPE bingsan_pool_discards_total counter
-bingsan_pool_discards_total{pool="buffer"} 12
-bingsan_pool_discards_total{pool="bytes"} 0
-
-# HELP bingsan_pool_misses_total Total number of pool misses
-# TYPE bingsan_pool_misses_total counter
-bingsan_pool_misses_total{pool="buffer"} 5
-bingsan_pool_misses_total{pool="bytes"} 3
-```
diff --git a/docs-site/content/docs/performance/pooling.md b/docs-site/content/docs/performance/pooling.md
deleted file mode 100644
index fba8a13..0000000
--- a/docs-site/content/docs/performance/pooling.md
+++ /dev/null
@@ -1,262 +0,0 @@
----
-title: "Object Pooling"
-weight: 1
----
-
-# Object Pooling
-
-Bingsan uses `sync.Pool` from Go's standard library to reduce memory allocation pressure in hot paths. This significantly reduces garbage collection overhead and improves response latency consistency.
-
-## Overview
-
-Two types of pools are implemented:
-
-| Pool | Purpose | Default Size | Max Size |
-|------|---------|--------------|----------|
-| **BufferPool** | JSON serialization buffers | 4 KB | 64 KB |
-| **BytePool** | OAuth token generation | 32 bytes | 32 bytes |
-
-## How It Works
-
-### BufferPool
-
-The `BufferPool` provides reusable `bytes.Buffer` instances for JSON serialization:
-
-```
-Request 1 ──► Get buffer ──► Serialize JSON ──► Return buffer ──► Pool
- │ ▲
- └──────────────────────────────────┘
- Reused
-
-Request 2 ──► Get buffer (same one!) ──► Serialize JSON ──► Return buffer
-```
-
-**Key characteristics:**
-
-- Initial capacity: 4 KB (typical JSON metadata size)
-- Maximum size: 64 KB (oversized buffers are discarded)
-- Thread-safe via `sync.Pool`
-- Automatic reset on get
-
-### BytePool
-
-The `BytePool` provides fixed-size byte slices for token generation:
-
-- Fixed size: 32 bytes (64 hex characters when encoded)
-- Used for OAuth access token generation
-- Contents not zeroed on reuse (overwritten anyway)
-
-## Usage Patterns
-
-### In API Handlers
-
-Buffers are acquired and released within request handlers:
-
-```go
-func (h *Handler) GetTable(ctx *fiber.Ctx) error {
- // Get buffer from pool
- buf := pool.GetBuffer()
- defer pool.PutBuffer(buf) // Always return!
-
- // Use buffer for serialization
- encoder := json.NewEncoder(buf)
- if err := encoder.Encode(table); err != nil {
- return err
- }
-
- return ctx.Send(buf.Bytes())
-}
-```
-
-### With Metrics
-
-For observability, use the metrics-enabled pool:
-
-```go
-metrics := pool.NewPoolMetrics()
-bufferPool := pool.NewBufferPool(metrics)
-
-buf := bufferPool.Get()
-defer bufferPool.Put(buf)
-
-// Check stats
-stats := metrics.Stats()
-fmt.Printf("Hit rate: %.2f%%\n", stats.HitRate()*100)
-```
-
-## Configuration
-
-Pool behavior is configured via constants (compile-time):
-
-| Constant | Value | Description |
-|----------|-------|-------------|
-| `DefaultBufferSize` | 4096 | Initial buffer capacity in bytes |
-| `MaxBufferSize` | 65536 | Maximum buffer size before discard |
-| `TokenSize` | 32 | Fixed size for token byte slices |
-
-### Tuning via Custom Build
-
-To customize pool sizes, modify `internal/pool/buffer.go`:
-
-```go
-const (
- DefaultBufferSize = 8192 // Increase for larger schemas
- MaxBufferSize = 131072 // Increase for very large metadata
-)
-```
-
-## Memory Management
-
-### Buffer Lifecycle
-
-```
-┌──────────────────────────────────────────────────────────────────┐
-│ Buffer Lifecycle │
-└──────────────────────────────────────────────────────────────────┘
-
-1. Get() called
- │
- ▼
-┌─────────────────────┐
-│ Pool has buffer? │──No──► Create new buffer (4KB)
-└─────────────────────┘ │
- │ Yes │
- ▼ ▼
-┌─────────────────────┐ ┌─────────────────────┐
-│ Return from pool │ │ Grow to initial cap │
-└─────────────────────┘ └─────────────────────┘
- │ │
- └──────────────┬─────────────┘
- │
- ▼
- ┌─────────────────────┐
- │ Reset buffer │
- │ (clear contents) │
- └─────────────────────┘
- │
- ▼
- ┌─────────────────────┐
- │ Use for operation │
- │ (may grow buffer) │
- └─────────────────────┘
- │
- ▼
- ┌─────────────────────┐
- │ Put() called │
- └─────────────────────┘
- │
- ▼
- ┌─────────────────────┐
- │ Size > 64KB? │──Yes──► Discard (GC reclaims)
- └─────────────────────┘
- │ No
- ▼
- ┌─────────────────────┐
- │ Return to pool │
- └─────────────────────┘
-```
-
-### Why Discard Oversized Buffers
-
-Large buffers are discarded to prevent memory bloat:
-
-1. **Occasional large responses** don't permanently increase pool memory
-2. **Memory stays bounded** even with variable workloads
-3. **GC can reclaim** memory from large temporary allocations
-
-## Best Practices
-
-### Always Use `defer`
-
-```go
-buf := pool.GetBuffer()
-defer pool.PutBuffer(buf) // Guaranteed return
-```
-
-### Don't Hold References
-
-```go
-// Wrong: Reference escapes
-data := buf.Bytes()
-pool.PutBuffer(buf)
-return data // data is now invalid!
-
-// Correct: Copy if needed
-data := make([]byte, buf.Len())
-copy(data, buf.Bytes())
-pool.PutBuffer(buf)
-return data
-```
-
-### Don't Use After Put
-
-```go
-pool.PutBuffer(buf)
-buf.Write([]byte("data")) // Wrong: buf may be reused!
-```
-
-## Metrics
-
-Pool performance is exposed via Prometheus:
-
-| Metric | Type | Description |
-|--------|------|-------------|
-| `bingsan_pool_gets_total` | Counter | Total Get() operations |
-| `bingsan_pool_returns_total` | Counter | Total Put() operations |
-| `bingsan_pool_discards_total` | Counter | Oversized items discarded |
-| `bingsan_pool_misses_total` | Counter | New allocations (pool empty) |
-
-### Example Queries
-
-**Pool utilization rate:**
-```promql
-rate(bingsan_pool_returns_total{pool="buffer"}[5m])
-/ rate(bingsan_pool_gets_total{pool="buffer"}[5m])
-```
-
-**Discard rate (should be low):**
-```promql
-rate(bingsan_pool_discards_total{pool="buffer"}[5m])
-```
-
-## Benchmarks
-
-Run pool benchmarks:
-
-```bash
-go test -bench=BenchmarkPool -benchmem ./tests/benchmark/...
-```
-
-Expected results:
-
-| Benchmark | Time | Allocs |
-|-----------|------|--------|
-| BufferPool.Get/Put | ~50ns | 0 |
-| BufferPool.Concurrent | ~100ns | 0 |
-| BytePool.Get/Put | ~30ns | 0 |
-
-## Troubleshooting
-
-### High Discard Rate
-
-If `bingsan_pool_discards_total` is increasing rapidly:
-
-1. **Cause**: Many large responses exceeding 64KB
-2. **Impact**: Reduced pool effectiveness
-3. **Solution**: Consider increasing `MaxBufferSize` for schemas with 100+ columns
-
-### Pool Not Reducing Allocations
-
-If memory allocations aren't decreasing:
-
-1. **Check**: Handlers use `pool.GetBuffer()` / `pool.PutBuffer()`
-2. **Check**: All code paths call `Put()` (including error paths)
-3. **Check**: `defer` is used to ensure returns
-
-### Memory Growing Over Time
-
-If heap size keeps increasing:
-
-1. **Check**: `MaxBufferSize` isn't too high
-2. **Check**: No buffer references escaping
-3. **Profile**: Use `go tool pprof` to identify leaks
diff --git a/docs-site/content/docs/performance/tuning.md b/docs-site/content/docs/performance/tuning.md
deleted file mode 100644
index b93eb60..0000000
--- a/docs-site/content/docs/performance/tuning.md
+++ /dev/null
@@ -1,407 +0,0 @@
----
-title: "Tuning"
-weight: 5
----
-
-# Performance Tuning
-
-This guide covers how to tune Bingsan for optimal performance based on your workload characteristics.
-
-## Quick Reference
-
-| Workload | Lock Timeout | Retry Interval | Max Retries | Buffer Size |
-|----------|--------------|----------------|-------------|-------------|
-| Low latency | 5s | 50ms | 20 | 4KB |
-| High throughput | 30s | 100ms | 100 | 4KB |
-| Large schemas | 30s | 100ms | 100 | 8-16KB |
-| Batch processing | 120s | 1s | 60 | 4KB |
-
----
-
-## Workload Profiles
-
-### Low Latency
-
-Prioritize fast responses over throughput:
-
-```yaml
-catalog:
- lock_timeout: 5s
- lock_retry_interval: 50ms
- max_lock_retries: 20
-
-server:
- read_timeout: 10s
- write_timeout: 10s
-
-database:
- max_open_conns: 50
- max_idle_conns: 25
-```
-
-**Characteristics:**
-- Fails fast on lock contention
-- More aggressive retries
-- Higher connection pool
-
-### High Throughput
-
-Maximize requests per second:
-
-```yaml
-catalog:
- lock_timeout: 30s
- lock_retry_interval: 100ms
- max_lock_retries: 100
-
-server:
- read_timeout: 60s
- write_timeout: 60s
- idle_timeout: 300s
-
-database:
- max_open_conns: 100
- max_idle_conns: 50
- conn_max_lifetime: 30m
-```
-
-**Characteristics:**
-- Patient lock acquisition
-- Long-lived connections
-- Higher resource usage
-
-### Large Schemas
-
-For tables with 100+ columns:
-
-```yaml
-catalog:
- lock_timeout: 30s
- lock_retry_interval: 100ms
- max_lock_retries: 100
-
-# Compile-time constants in internal/pool/buffer.go:
-# DefaultBufferSize = 8192 (8KB)
-# MaxBufferSize = 131072 (128KB)
-```
-
-**Characteristics:**
-- Larger initial buffers
-- Higher max buffer size
-- Reduced buffer discards
-
-### Batch Processing
-
-For bulk operations:
-
-```yaml
-catalog:
- lock_timeout: 120s
- lock_retry_interval: 1s
- max_lock_retries: 60
-
-server:
- read_timeout: 300s
- write_timeout: 300s
-
-database:
- max_open_conns: 25
- conn_max_lifetime: 60m
-```
-
-**Characteristics:**
-- Very patient operations
-- Conservative resources
-- Long timeouts
-
----
-
-## Tuning by Symptom
-
-### High Latency
-
-**Symptoms:**
-- p99 latency > 100ms
-- Slow table loads
-- Client timeouts
-
-**Diagnosis:**
-```promql
-# Check lock wait time
-rate(iceberg_db_wait_duration_seconds_total[5m])
-
-# Check pool discard rate
-rate(bingsan_pool_discards_total[5m])
-
-# Check connection saturation
-iceberg_db_connections_in_use / iceberg_db_connections_max
-```
-
-**Solutions:**
-
-1. **Lock contention** - Reduce `lock_timeout`, increase `max_lock_retries`
-2. **Pool discards** - Increase `MaxBufferSize` for large schemas
-3. **Connection pool** - Increase `max_open_conns`
-
-### High Memory Usage
-
-**Symptoms:**
-- Memory growth over time
-- OOM kills
-- High GC pressure
-
-**Diagnosis:**
-```bash
-# Heap profile
-curl http://localhost:8181/debug/pprof/heap > heap.prof
-go tool pprof heap.prof
-
-# Check pool stats
-curl http://localhost:8181/metrics | grep bingsan_pool
-```
-
-**Solutions:**
-
-1. **Buffer leaks** - Check all code paths return buffers
-2. **Large buffers** - Reduce `MaxBufferSize`
-3. **Connection bloat** - Reduce `max_open_conns`
-
-### Lock Timeout Errors
-
-**Symptoms:**
-- `ErrLockTimeout` errors
-- 409 Conflict responses
-- Failed commits
-
-**Diagnosis:**
-```sql
--- Check active locks
-SELECT * FROM pg_locks WHERE NOT granted;
-
--- Check blocking queries
-SELECT * FROM pg_stat_activity
-WHERE wait_event_type = 'Lock';
-```
-
-**Solutions:**
-
-1. **High contention** - Increase `max_lock_retries`
-2. **Slow transactions** - Keep transactions short
-3. **Deadlocks** - Bingsan handles these automatically
-
-### Connection Exhaustion
-
-**Symptoms:**
-- `too many connections` errors
-- Connection timeouts
-- Slow query start
-
-**Diagnosis:**
-```promql
-# Connection utilization
-iceberg_db_connections_in_use / iceberg_db_connections_max > 0.9
-```
-
-**Solutions:**
-
-1. **Increase pool** - Raise `max_open_conns`
-2. **Add PgBouncer** - Connection multiplexing
-3. **Reduce instances** - Fewer Bingsan replicas
-
----
-
-## Database Tuning
-
-### PostgreSQL Settings
-
-```sql
--- Increase max connections
-ALTER SYSTEM SET max_connections = 500;
-
--- Lock timeout (server-wide default)
-ALTER SYSTEM SET lock_timeout = '30s';
-
--- Statement timeout
-ALTER SYSTEM SET statement_timeout = '60s';
-
--- Effective cache size (75% of RAM)
-ALTER SYSTEM SET effective_cache_size = '12GB';
-
--- Shared buffers (25% of RAM)
-ALTER SYSTEM SET shared_buffers = '4GB';
-
--- Work memory
-ALTER SYSTEM SET work_mem = '256MB';
-```
-
-### Index Optimization
-
-Ensure indexes exist for common queries:
-
-```sql
--- Verify indexes
-\di iceberg_*
-
--- Analyze tables
-ANALYZE VERBOSE;
-
--- Check index usage
-SELECT schemaname, tablename, indexname, idx_scan
-FROM pg_stat_user_indexes
-ORDER BY idx_scan DESC;
-```
-
-### Connection Pooling with PgBouncer
-
-For many Bingsan instances:
-
-```ini
-# pgbouncer.ini
-[databases]
-iceberg_catalog = host=postgres port=5432 dbname=iceberg_catalog
-
-[pgbouncer]
-pool_mode = transaction
-max_client_conn = 1000
-default_pool_size = 50
-reserve_pool_size = 10
-reserve_pool_timeout = 3
-```
-
----
-
-## Resource Sizing
-
-### Memory
-
-```
-memory_per_instance = base + (concurrent_requests × request_memory)
- ≈ 50MB + (500 × 100KB)
- ≈ 100MB typical
- ≈ 200MB peak
-```
-
-### CPU
-
-```
-cpu_per_instance ≈ 0.2 cores idle
- ≈ 1 core under load
-```
-
-### Instances
-
-```
-instances = (peak_rps / rps_per_instance) × 1.5
- = (10000 / 5000) × 1.5
- = 3 instances minimum
-```
-
----
-
-## Profiling
-
-### CPU Profile
-
-```bash
-# Start profiling
-curl http://localhost:8181/debug/pprof/profile?seconds=30 > cpu.prof
-
-# Analyze
-go tool pprof -http=:8080 cpu.prof
-```
-
-### Memory Profile
-
-```bash
-# Heap snapshot
-curl http://localhost:8181/debug/pprof/heap > heap.prof
-
-# Analyze
-go tool pprof -http=:8080 heap.prof
-```
-
-### Trace
-
-```bash
-# Capture trace
-curl http://localhost:8181/debug/pprof/trace?seconds=5 > trace.out
-
-# Analyze
-go tool trace trace.out
-```
-
-### Goroutine Analysis
-
-```bash
-# Goroutine dump
-curl http://localhost:8181/debug/pprof/goroutine > goroutine.prof
-
-# Analyze
-go tool pprof goroutine.prof
-```
-
----
-
-## Benchmark-Driven Tuning
-
-### Step 1: Establish Baseline
-
-```bash
-go test -bench=. -benchmem ./tests/benchmark/... | tee baseline.txt
-```
-
-### Step 2: Identify Bottlenecks
-
-```bash
-go test -bench=BenchmarkTable -cpuprofile=cpu.prof ./tests/benchmark/...
-go tool pprof -top cpu.prof
-```
-
-### Step 3: Make Changes
-
-Adjust configuration or code based on profile results.
-
-### Step 4: Measure Impact
-
-```bash
-go test -bench=. -benchmem ./tests/benchmark/... | tee optimized.txt
-benchstat baseline.txt optimized.txt
-```
-
-### Step 5: Validate in Production
-
-```bash
-# Load test
-cd benchmarks
-make read-benchmark
-
-# Monitor metrics
-watch -n 1 'curl -s localhost:8181/metrics | grep bingsan_pool'
-```
-
----
-
-## Checklist
-
-### Pre-Production
-
-- [ ] Set appropriate `lock_timeout` for your workload
-- [ ] Configure `max_open_conns` based on expected load
-- [ ] Enable Prometheus metrics collection
-- [ ] Set up alerting for pool health
-- [ ] Run load tests with realistic data
-
-### Production Monitoring
-
-- [ ] Pool hit rate > 80%
-- [ ] Pool discard rate < 1%
-- [ ] Lock timeout rate < 1%
-- [ ] Connection utilization < 90%
-- [ ] GC pause p99 < 10ms
-
-### Troubleshooting Resources
-
-- [Object Pooling]({{< relref "/docs/performance/pooling" >}}) - Buffer pool details
-- [Distributed Locking]({{< relref "/docs/performance/locking" >}}) - Lock configuration
-- [Metrics]({{< relref "/docs/performance/metrics" >}}) - Monitoring setup
-- [Benchmarking]({{< relref "/docs/performance/benchmarking" >}}) - Load testing
diff --git a/docs-site/hugo.toml b/docs-site/hugo.toml
deleted file mode 100644
index da38c96..0000000
--- a/docs-site/hugo.toml
+++ /dev/null
@@ -1,40 +0,0 @@
-baseURL = 'https://bingsan.dev/'
-languageCode = 'en-us'
-title = 'Bingsan Documentation'
-theme = 'hugo-book'
-
-# Book configuration
-disablePathToLower = true
-enableGitInfo = false
-
-# Needed for menu
-[menu]
-[[menu.after]]
- name = "GitHub"
- url = "https://github.com/kimuyb/bingsan"
- weight = 10
-
-[params]
- # (Optional, default light) Sets color theme: light, dark or auto.
- BookTheme = 'auto'
- # (Optional, default true) Controls table of contents visibility on right side of pages.
- BookToC = true
- # (Optional, default docs) Specify root page to render child pages as menu.
- BookSection = 'docs'
- # Set source repository location.
- BookRepo = 'https://github.com/kimuyb/bingsan'
- # Enable "Edit this page" links for 'doc' page type.
- BookEditPath = 'edit/main/docs-site/content'
- # Breadcrumbs
- BookBreadcrumb = true
-
-[markup]
- [markup.highlight]
- noClasses = false
- style = 'monokai'
- [markup.goldmark]
- [markup.goldmark.renderer]
- unsafe = true
-
-# Multi-lingual mode config
-defaultContentLanguage = 'en'
diff --git a/docs-site/lib/i18n.ts b/docs-site/lib/i18n.ts
new file mode 100644
index 0000000..b5ce1df
--- /dev/null
+++ b/docs-site/lib/i18n.ts
@@ -0,0 +1,14 @@
+import type { I18nConfig } from 'fumadocs-core/i18n';
+
+export const i18n: I18nConfig = {
+ defaultLanguage: 'en',
+ languages: ['en', 'ko'],
+ parser: 'dir',
+};
+
+export type Locale = (typeof i18n)['languages'][number];
+
+export const localeNames: Record = {
+ en: 'English',
+ ko: '한국어',
+};
diff --git a/docs-site/lib/source.ts b/docs-site/lib/source.ts
new file mode 100644
index 0000000..bec1e4c
--- /dev/null
+++ b/docs-site/lib/source.ts
@@ -0,0 +1,9 @@
+import { docs } from 'fumadocs-mdx:collections/server';
+import { loader } from 'fumadocs-core/source';
+import { i18n } from '@/lib/i18n';
+
+export const source = loader({
+ baseUrl: '/docs',
+ source: docs.toFumadocsSource(),
+ i18n,
+});
diff --git a/docs-site/next-env.d.ts b/docs-site/next-env.d.ts
new file mode 100644
index 0000000..c4b7818
--- /dev/null
+++ b/docs-site/next-env.d.ts
@@ -0,0 +1,6 @@
+///
+///
+import "./.next/dev/types/routes.d.ts";
+
+// NOTE: This file should not be edited
+// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
diff --git a/docs-site/next.config.mjs b/docs-site/next.config.mjs
new file mode 100644
index 0000000..d5ac07c
--- /dev/null
+++ b/docs-site/next.config.mjs
@@ -0,0 +1,13 @@
+import { createMDX } from 'fumadocs-mdx/next';
+
+const withMDX = createMDX();
+
+/** @type {import('next').NextConfig} */
+const config = {
+ output: process.env.NODE_ENV === 'production' ? 'export' : undefined,
+ images: { unoptimized: true },
+ basePath: process.env.NEXT_PUBLIC_BASE_PATH || '',
+ trailingSlash: true,
+};
+
+export default withMDX(config);
diff --git a/docs-site/package.json b/docs-site/package.json
new file mode 100644
index 0000000..eed357a
--- /dev/null
+++ b/docs-site/package.json
@@ -0,0 +1,31 @@
+{
+ "name": "bingsan-docs",
+ "version": "1.0.0",
+ "description": "Bingsan Apache Iceberg REST Catalog Documentation",
+ "private": true,
+ "scripts": {
+ "dev": "next dev",
+ "build": "next build",
+ "start": "next start",
+ "lint": "next lint"
+ },
+ "dependencies": {
+ "fumadocs-core": "^16.4.7",
+ "fumadocs-mdx": "^14.2.5",
+ "fumadocs-ui": "^16.4.7",
+ "next": "^16.1.1",
+ "react": "^19.2.3",
+ "react-dom": "^19.2.3"
+ },
+ "devDependencies": {
+ "@tailwindcss/postcss": "^4.1.18",
+ "@types/mdx": "^2.0.13",
+ "@types/node": "^22.0.0",
+ "@types/react": "^19.0.0",
+ "@types/react-dom": "^19.0.0",
+ "autoprefixer": "^10.4.21",
+ "postcss": "^8.5.4",
+ "tailwindcss": "^4.0.0",
+ "typescript": "^5.7.3"
+ }
+}
diff --git a/docs-site/pnpm-lock.yaml b/docs-site/pnpm-lock.yaml
new file mode 100644
index 0000000..da340ff
--- /dev/null
+++ b/docs-site/pnpm-lock.yaml
@@ -0,0 +1,3962 @@
+lockfileVersion: '9.0'
+
+settings:
+ autoInstallPeers: true
+ excludeLinksFromLockfile: false
+
+importers:
+
+ .:
+ dependencies:
+ fumadocs-core:
+ specifier: ^16.4.7
+ version: 16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5)
+ fumadocs-mdx:
+ specifier: ^14.2.5
+ version: 14.2.5(@types/react@19.2.8)(fumadocs-core@16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3)
+ fumadocs-ui:
+ specifier: ^16.4.7
+ version: 16.4.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(fumadocs-core@16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(tailwindcss@4.1.18)
+ next:
+ specifier: ^16.1.1
+ version: 16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ react:
+ specifier: ^19.2.3
+ version: 19.2.3
+ react-dom:
+ specifier: ^19.2.3
+ version: 19.2.3(react@19.2.3)
+ devDependencies:
+ '@tailwindcss/postcss':
+ specifier: ^4.1.18
+ version: 4.1.18
+ '@types/mdx':
+ specifier: ^2.0.13
+ version: 2.0.13
+ '@types/node':
+ specifier: ^22.0.0
+ version: 22.19.6
+ '@types/react':
+ specifier: ^19.0.0
+ version: 19.2.8
+ '@types/react-dom':
+ specifier: ^19.0.0
+ version: 19.2.3(@types/react@19.2.8)
+ autoprefixer:
+ specifier: ^10.4.21
+ version: 10.4.23(postcss@8.5.6)
+ postcss:
+ specifier: ^8.5.4
+ version: 8.5.6
+ tailwindcss:
+ specifier: ^4.0.0
+ version: 4.1.18
+ typescript:
+ specifier: ^5.7.3
+ version: 5.9.3
+
+packages:
+
+ '@alloc/quick-lru@5.2.0':
+ resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
+ engines: {node: '>=10'}
+
+ '@emnapi/runtime@1.8.1':
+ resolution: {integrity: sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==}
+
+ '@esbuild/aix-ppc64@0.27.2':
+ resolution: {integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==}
+ engines: {node: '>=18'}
+ cpu: [ppc64]
+ os: [aix]
+
+ '@esbuild/android-arm64@0.27.2':
+ resolution: {integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [android]
+
+ '@esbuild/android-arm@0.27.2':
+ resolution: {integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==}
+ engines: {node: '>=18'}
+ cpu: [arm]
+ os: [android]
+
+ '@esbuild/android-x64@0.27.2':
+ resolution: {integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [android]
+
+ '@esbuild/darwin-arm64@0.27.2':
+ resolution: {integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@esbuild/darwin-x64@0.27.2':
+ resolution: {integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [darwin]
+
+ '@esbuild/freebsd-arm64@0.27.2':
+ resolution: {integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [freebsd]
+
+ '@esbuild/freebsd-x64@0.27.2':
+ resolution: {integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [freebsd]
+
+ '@esbuild/linux-arm64@0.27.2':
+ resolution: {integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [linux]
+
+ '@esbuild/linux-arm@0.27.2':
+ resolution: {integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==}
+ engines: {node: '>=18'}
+ cpu: [arm]
+ os: [linux]
+
+ '@esbuild/linux-ia32@0.27.2':
+ resolution: {integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==}
+ engines: {node: '>=18'}
+ cpu: [ia32]
+ os: [linux]
+
+ '@esbuild/linux-loong64@0.27.2':
+ resolution: {integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==}
+ engines: {node: '>=18'}
+ cpu: [loong64]
+ os: [linux]
+
+ '@esbuild/linux-mips64el@0.27.2':
+ resolution: {integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==}
+ engines: {node: '>=18'}
+ cpu: [mips64el]
+ os: [linux]
+
+ '@esbuild/linux-ppc64@0.27.2':
+ resolution: {integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==}
+ engines: {node: '>=18'}
+ cpu: [ppc64]
+ os: [linux]
+
+ '@esbuild/linux-riscv64@0.27.2':
+ resolution: {integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==}
+ engines: {node: '>=18'}
+ cpu: [riscv64]
+ os: [linux]
+
+ '@esbuild/linux-s390x@0.27.2':
+ resolution: {integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==}
+ engines: {node: '>=18'}
+ cpu: [s390x]
+ os: [linux]
+
+ '@esbuild/linux-x64@0.27.2':
+ resolution: {integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [linux]
+
+ '@esbuild/netbsd-arm64@0.27.2':
+ resolution: {integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [netbsd]
+
+ '@esbuild/netbsd-x64@0.27.2':
+ resolution: {integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [netbsd]
+
+ '@esbuild/openbsd-arm64@0.27.2':
+ resolution: {integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [openbsd]
+
+ '@esbuild/openbsd-x64@0.27.2':
+ resolution: {integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [openbsd]
+
+ '@esbuild/openharmony-arm64@0.27.2':
+ resolution: {integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [openharmony]
+
+ '@esbuild/sunos-x64@0.27.2':
+ resolution: {integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [sunos]
+
+ '@esbuild/win32-arm64@0.27.2':
+ resolution: {integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==}
+ engines: {node: '>=18'}
+ cpu: [arm64]
+ os: [win32]
+
+ '@esbuild/win32-ia32@0.27.2':
+ resolution: {integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==}
+ engines: {node: '>=18'}
+ cpu: [ia32]
+ os: [win32]
+
+ '@esbuild/win32-x64@0.27.2':
+ resolution: {integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==}
+ engines: {node: '>=18'}
+ cpu: [x64]
+ os: [win32]
+
+ '@floating-ui/core@1.7.3':
+ resolution: {integrity: sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==}
+
+ '@floating-ui/dom@1.7.4':
+ resolution: {integrity: sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==}
+
+ '@floating-ui/react-dom@2.1.6':
+ resolution: {integrity: sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==}
+ peerDependencies:
+ react: '>=16.8.0'
+ react-dom: '>=16.8.0'
+
+ '@floating-ui/utils@0.2.10':
+ resolution: {integrity: sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==}
+
+ '@formatjs/fast-memoize@3.0.3':
+ resolution: {integrity: sha512-CArYtQKGLAOruCMeq5/RxCg6vUXFx3OuKBdTm30Wn/+gCefehmZ8Y2xSMxMrO2iel7hRyE3HKfV56t3vAU6D4Q==}
+
+ '@formatjs/intl-localematcher@0.7.5':
+ resolution: {integrity: sha512-7/nd90cn5CT7SVF71/ybUKAcnvBlr9nZlJJp8O8xIZHXFgYOC4SXExZlSdgHv2l6utjw1byidL06QzChvQMHwA==}
+
+ '@fumadocs/ui@16.4.7':
+ resolution: {integrity: sha512-NnkMIN5BzBRh2OzA9rp2SgbGEkEwfCfq0sE4vq2n+GkIDIggicGYUNgSl2gtIBQsKYKP/a4/0wrkQKdq4eUJlw==}
+ peerDependencies:
+ '@types/react': '*'
+ fumadocs-core: 16.4.7
+ next: 16.x.x
+ react: ^19.2.0
+ react-dom: ^19.2.0
+ tailwindcss: ^4.0.0
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ next:
+ optional: true
+ tailwindcss:
+ optional: true
+
+ '@img/colour@1.0.0':
+ resolution: {integrity: sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==}
+ engines: {node: '>=18'}
+
+ '@img/sharp-darwin-arm64@0.34.5':
+ resolution: {integrity: sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@img/sharp-darwin-x64@0.34.5':
+ resolution: {integrity: sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [x64]
+ os: [darwin]
+
+ '@img/sharp-libvips-darwin-arm64@1.2.4':
+ resolution: {integrity: sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@img/sharp-libvips-darwin-x64@1.2.4':
+ resolution: {integrity: sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==}
+ cpu: [x64]
+ os: [darwin]
+
+ '@img/sharp-libvips-linux-arm64@1.2.4':
+ resolution: {integrity: sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==}
+ cpu: [arm64]
+ os: [linux]
+
+ '@img/sharp-libvips-linux-arm@1.2.4':
+ resolution: {integrity: sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==}
+ cpu: [arm]
+ os: [linux]
+
+ '@img/sharp-libvips-linux-ppc64@1.2.4':
+ resolution: {integrity: sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==}
+ cpu: [ppc64]
+ os: [linux]
+
+ '@img/sharp-libvips-linux-riscv64@1.2.4':
+ resolution: {integrity: sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==}
+ cpu: [riscv64]
+ os: [linux]
+
+ '@img/sharp-libvips-linux-s390x@1.2.4':
+ resolution: {integrity: sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==}
+ cpu: [s390x]
+ os: [linux]
+
+ '@img/sharp-libvips-linux-x64@1.2.4':
+ resolution: {integrity: sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==}
+ cpu: [x64]
+ os: [linux]
+
+ '@img/sharp-libvips-linuxmusl-arm64@1.2.4':
+ resolution: {integrity: sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==}
+ cpu: [arm64]
+ os: [linux]
+
+ '@img/sharp-libvips-linuxmusl-x64@1.2.4':
+ resolution: {integrity: sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==}
+ cpu: [x64]
+ os: [linux]
+
+ '@img/sharp-linux-arm64@0.34.5':
+ resolution: {integrity: sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [arm64]
+ os: [linux]
+
+ '@img/sharp-linux-arm@0.34.5':
+ resolution: {integrity: sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [arm]
+ os: [linux]
+
+ '@img/sharp-linux-ppc64@0.34.5':
+ resolution: {integrity: sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [ppc64]
+ os: [linux]
+
+ '@img/sharp-linux-riscv64@0.34.5':
+ resolution: {integrity: sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [riscv64]
+ os: [linux]
+
+ '@img/sharp-linux-s390x@0.34.5':
+ resolution: {integrity: sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [s390x]
+ os: [linux]
+
+ '@img/sharp-linux-x64@0.34.5':
+ resolution: {integrity: sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [x64]
+ os: [linux]
+
+ '@img/sharp-linuxmusl-arm64@0.34.5':
+ resolution: {integrity: sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [arm64]
+ os: [linux]
+
+ '@img/sharp-linuxmusl-x64@0.34.5':
+ resolution: {integrity: sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [x64]
+ os: [linux]
+
+ '@img/sharp-wasm32@0.34.5':
+ resolution: {integrity: sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [wasm32]
+
+ '@img/sharp-win32-arm64@0.34.5':
+ resolution: {integrity: sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [arm64]
+ os: [win32]
+
+ '@img/sharp-win32-ia32@0.34.5':
+ resolution: {integrity: sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [ia32]
+ os: [win32]
+
+ '@img/sharp-win32-x64@0.34.5':
+ resolution: {integrity: sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+ cpu: [x64]
+ os: [win32]
+
+ '@jridgewell/gen-mapping@0.3.13':
+ resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==}
+
+ '@jridgewell/remapping@2.3.5':
+ resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==}
+
+ '@jridgewell/resolve-uri@3.1.2':
+ resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
+ engines: {node: '>=6.0.0'}
+
+ '@jridgewell/sourcemap-codec@1.5.5':
+ resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==}
+
+ '@jridgewell/trace-mapping@0.3.31':
+ resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==}
+
+ '@mdx-js/mdx@3.1.1':
+ resolution: {integrity: sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==}
+
+ '@next/env@16.1.1':
+ resolution: {integrity: sha512-3oxyM97Sr2PqiVyMyrZUtrtM3jqqFxOQJVuKclDsgj/L728iZt/GyslkN4NwarledZATCenbk4Offjk1hQmaAA==}
+
+ '@next/swc-darwin-arm64@16.1.1':
+ resolution: {integrity: sha512-JS3m42ifsVSJjSTzh27nW+Igfha3NdBOFScr9C80hHGrWx55pTrVL23RJbqir7k7/15SKlrLHhh/MQzqBBYrQA==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@next/swc-darwin-x64@16.1.1':
+ resolution: {integrity: sha512-hbyKtrDGUkgkyQi1m1IyD3q4I/3m9ngr+V93z4oKHrPcmxwNL5iMWORvLSGAf2YujL+6HxgVvZuCYZfLfb4bGw==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [darwin]
+
+ '@next/swc-linux-arm64-gnu@16.1.1':
+ resolution: {integrity: sha512-/fvHet+EYckFvRLQ0jPHJCUI5/B56+2DpI1xDSvi80r/3Ez+Eaa2Yq4tJcRTaB1kqj/HrYKn8Yplm9bNoMJpwQ==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [linux]
+
+ '@next/swc-linux-arm64-musl@16.1.1':
+ resolution: {integrity: sha512-MFHrgL4TXNQbBPzkKKur4Fb5ICEJa87HM7fczFs2+HWblM7mMLdco3dvyTI+QmLBU9xgns/EeeINSZD6Ar+oLg==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [linux]
+
+ '@next/swc-linux-x64-gnu@16.1.1':
+ resolution: {integrity: sha512-20bYDfgOQAPUkkKBnyP9PTuHiJGM7HzNBbuqmD0jiFVZ0aOldz+VnJhbxzjcSabYsnNjMPsE0cyzEudpYxsrUQ==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [linux]
+
+ '@next/swc-linux-x64-musl@16.1.1':
+ resolution: {integrity: sha512-9pRbK3M4asAHQRkwaXwu601oPZHghuSC8IXNENgbBSyImHv/zY4K5udBusgdHkvJ/Tcr96jJwQYOll0qU8+fPA==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [linux]
+
+ '@next/swc-win32-arm64-msvc@16.1.1':
+ resolution: {integrity: sha512-bdfQkggaLgnmYrFkSQfsHfOhk/mCYmjnrbRCGgkMcoOBZ4n+TRRSLmT/CU5SATzlBJ9TpioUyBW/vWFXTqQRiA==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [win32]
+
+ '@next/swc-win32-x64-msvc@16.1.1':
+ resolution: {integrity: sha512-Ncwbw2WJ57Al5OX0k4chM68DKhEPlrXBaSXDCi2kPi5f4d8b3ejr3RRJGfKBLrn2YJL5ezNS7w2TZLHSti8CMw==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [win32]
+
+ '@orama/orama@3.1.18':
+ resolution: {integrity: sha512-a61ljmRVVyG5MC/698C8/FfFDw5a8LOIvyOLW5fztgUXqUpc1jOfQzOitSCbge657OgXXThmY3Tk8fpiDb4UcA==}
+ engines: {node: '>= 20.0.0'}
+
+ '@radix-ui/number@1.1.1':
+ resolution: {integrity: sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==}
+
+ '@radix-ui/primitive@1.1.3':
+ resolution: {integrity: sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==}
+
+ '@radix-ui/react-accordion@1.2.12':
+ resolution: {integrity: sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-arrow@1.1.7':
+ resolution: {integrity: sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-collapsible@1.1.12':
+ resolution: {integrity: sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-collection@1.1.7':
+ resolution: {integrity: sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-compose-refs@1.1.2':
+ resolution: {integrity: sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-context@1.1.2':
+ resolution: {integrity: sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-dialog@1.1.15':
+ resolution: {integrity: sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-direction@1.1.1':
+ resolution: {integrity: sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-dismissable-layer@1.1.11':
+ resolution: {integrity: sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-focus-guards@1.1.3':
+ resolution: {integrity: sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-focus-scope@1.1.7':
+ resolution: {integrity: sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-id@1.1.1':
+ resolution: {integrity: sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-navigation-menu@1.2.14':
+ resolution: {integrity: sha512-YB9mTFQvCOAQMHU+C/jVl96WmuWeltyUEpRJJky51huhds5W2FQr1J8D/16sQlf0ozxkPK8uF3niQMdUwZPv5w==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-popover@1.1.15':
+ resolution: {integrity: sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-popper@1.2.8':
+ resolution: {integrity: sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-portal@1.1.9':
+ resolution: {integrity: sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-presence@1.1.5':
+ resolution: {integrity: sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-primitive@2.1.3':
+ resolution: {integrity: sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-roving-focus@1.1.11':
+ resolution: {integrity: sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-scroll-area@1.2.10':
+ resolution: {integrity: sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-slot@1.2.3':
+ resolution: {integrity: sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-slot@1.2.4':
+ resolution: {integrity: sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-tabs@1.1.13':
+ resolution: {integrity: sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/react-use-callback-ref@1.1.1':
+ resolution: {integrity: sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-controllable-state@1.2.2':
+ resolution: {integrity: sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-effect-event@0.0.2':
+ resolution: {integrity: sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-escape-keydown@1.1.1':
+ resolution: {integrity: sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-layout-effect@1.1.1':
+ resolution: {integrity: sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-previous@1.1.1':
+ resolution: {integrity: sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-rect@1.1.1':
+ resolution: {integrity: sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-use-size@1.1.1':
+ resolution: {integrity: sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ '@radix-ui/react-visually-hidden@1.2.3':
+ resolution: {integrity: sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
+ '@radix-ui/rect@1.1.1':
+ resolution: {integrity: sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==}
+
+ '@shikijs/core@3.21.0':
+ resolution: {integrity: sha512-AXSQu/2n1UIQekY8euBJlvFYZIw0PHY63jUzGbrOma4wPxzznJXTXkri+QcHeBNaFxiiOljKxxJkVSoB3PjbyA==}
+
+ '@shikijs/engine-javascript@3.21.0':
+ resolution: {integrity: sha512-ATwv86xlbmfD9n9gKRiwuPpWgPENAWCLwYCGz9ugTJlsO2kOzhOkvoyV/UD+tJ0uT7YRyD530x6ugNSffmvIiQ==}
+
+ '@shikijs/engine-oniguruma@3.21.0':
+ resolution: {integrity: sha512-OYknTCct6qiwpQDqDdf3iedRdzj6hFlOPv5hMvI+hkWfCKs5mlJ4TXziBG9nyabLwGulrUjHiCq3xCspSzErYQ==}
+
+ '@shikijs/langs@3.21.0':
+ resolution: {integrity: sha512-g6mn5m+Y6GBJ4wxmBYqalK9Sp0CFkUqfNzUy2pJglUginz6ZpWbaWjDB4fbQ/8SHzFjYbtU6Ddlp1pc+PPNDVA==}
+
+ '@shikijs/rehype@3.21.0':
+ resolution: {integrity: sha512-fTQvwsZL67QdosMFdTgQ5SNjW3nxaPplRy//312hqOctRbIwviTV0nAbhv3NfnztHXvFli2zLYNKsTz/f9tbpQ==}
+
+ '@shikijs/themes@3.21.0':
+ resolution: {integrity: sha512-BAE4cr9EDiZyYzwIHEk7JTBJ9CzlPuM4PchfcA5ao1dWXb25nv6hYsoDiBq2aZK9E3dlt3WB78uI96UESD+8Mw==}
+
+ '@shikijs/transformers@3.21.0':
+ resolution: {integrity: sha512-CZwvCWWIiRRiFk9/JKzdEooakAP8mQDtBOQ1TKiCaS2E1bYtyBCOkUzS8akO34/7ufICQ29oeSfkb3tT5KtrhA==}
+
+ '@shikijs/types@3.21.0':
+ resolution: {integrity: sha512-zGrWOxZ0/+0ovPY7PvBU2gIS9tmhSUUt30jAcNV0Bq0gb2S98gwfjIs1vxlmH5zM7/4YxLamT6ChlqqAJmPPjA==}
+
+ '@shikijs/vscode-textmate@10.0.2':
+ resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==}
+
+ '@standard-schema/spec@1.1.0':
+ resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==}
+
+ '@swc/helpers@0.5.15':
+ resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==}
+
+ '@tailwindcss/node@4.1.18':
+ resolution: {integrity: sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==}
+
+ '@tailwindcss/oxide-android-arm64@4.1.18':
+ resolution: {integrity: sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [android]
+
+ '@tailwindcss/oxide-darwin-arm64@4.1.18':
+ resolution: {integrity: sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@tailwindcss/oxide-darwin-x64@4.1.18':
+ resolution: {integrity: sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [darwin]
+
+ '@tailwindcss/oxide-freebsd-x64@4.1.18':
+ resolution: {integrity: sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [freebsd]
+
+ '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18':
+ resolution: {integrity: sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==}
+ engines: {node: '>= 10'}
+ cpu: [arm]
+ os: [linux]
+
+ '@tailwindcss/oxide-linux-arm64-gnu@4.1.18':
+ resolution: {integrity: sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [linux]
+
+ '@tailwindcss/oxide-linux-arm64-musl@4.1.18':
+ resolution: {integrity: sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [linux]
+
+ '@tailwindcss/oxide-linux-x64-gnu@4.1.18':
+ resolution: {integrity: sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [linux]
+
+ '@tailwindcss/oxide-linux-x64-musl@4.1.18':
+ resolution: {integrity: sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [linux]
+
+ '@tailwindcss/oxide-wasm32-wasi@4.1.18':
+ resolution: {integrity: sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==}
+ engines: {node: '>=14.0.0'}
+ cpu: [wasm32]
+ bundledDependencies:
+ - '@napi-rs/wasm-runtime'
+ - '@emnapi/core'
+ - '@emnapi/runtime'
+ - '@tybys/wasm-util'
+ - '@emnapi/wasi-threads'
+ - tslib
+
+ '@tailwindcss/oxide-win32-arm64-msvc@4.1.18':
+ resolution: {integrity: sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==}
+ engines: {node: '>= 10'}
+ cpu: [arm64]
+ os: [win32]
+
+ '@tailwindcss/oxide-win32-x64-msvc@4.1.18':
+ resolution: {integrity: sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==}
+ engines: {node: '>= 10'}
+ cpu: [x64]
+ os: [win32]
+
+ '@tailwindcss/oxide@4.1.18':
+ resolution: {integrity: sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==}
+ engines: {node: '>= 10'}
+
+ '@tailwindcss/postcss@4.1.18':
+ resolution: {integrity: sha512-Ce0GFnzAOuPyfV5SxjXGn0CubwGcuDB0zcdaPuCSzAa/2vII24JTkH+I6jcbXLb1ctjZMZZI6OjDaLPJQL1S0g==}
+
+ '@types/debug@4.1.12':
+ resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==}
+
+ '@types/estree-jsx@1.0.5':
+ resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==}
+
+ '@types/estree@1.0.8':
+ resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==}
+
+ '@types/hast@3.0.4':
+ resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==}
+
+ '@types/mdast@4.0.4':
+ resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==}
+
+ '@types/mdx@2.0.13':
+ resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==}
+
+ '@types/ms@2.1.0':
+ resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==}
+
+ '@types/node@22.19.6':
+ resolution: {integrity: sha512-qm+G8HuG6hOHQigsi7VGuLjUVu6TtBo/F05zvX04Mw2uCg9Dv0Qxy3Qw7j41SidlTcl5D/5yg0SEZqOB+EqZnQ==}
+
+ '@types/react-dom@19.2.3':
+ resolution: {integrity: sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==}
+ peerDependencies:
+ '@types/react': ^19.2.0
+
+ '@types/react@19.2.8':
+ resolution: {integrity: sha512-3MbSL37jEchWZz2p2mjntRZtPt837ij10ApxKfgmXCTuHWagYg7iA5bqPw6C8BMPfwidlvfPI/fxOc42HLhcyg==}
+
+ '@types/unist@2.0.11':
+ resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==}
+
+ '@types/unist@3.0.3':
+ resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==}
+
+ '@ungap/structured-clone@1.3.0':
+ resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
+
+ acorn-jsx@5.3.2:
+ resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
+ peerDependencies:
+ acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+
+ acorn@8.15.0:
+ resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==}
+ engines: {node: '>=0.4.0'}
+ hasBin: true
+
+ argparse@2.0.1:
+ resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
+
+ aria-hidden@1.2.6:
+ resolution: {integrity: sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==}
+ engines: {node: '>=10'}
+
+ astring@1.9.0:
+ resolution: {integrity: sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==}
+ hasBin: true
+
+ autoprefixer@10.4.23:
+ resolution: {integrity: sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==}
+ engines: {node: ^10 || ^12 || >=14}
+ hasBin: true
+ peerDependencies:
+ postcss: ^8.1.0
+
+ bail@2.0.2:
+ resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==}
+
+ baseline-browser-mapping@2.9.14:
+ resolution: {integrity: sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==}
+ hasBin: true
+
+ browserslist@4.28.1:
+ resolution: {integrity: sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==}
+ engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
+ hasBin: true
+
+ caniuse-lite@1.0.30001764:
+ resolution: {integrity: sha512-9JGuzl2M+vPL+pz70gtMF9sHdMFbY9FJaQBi186cHKH3pSzDvzoUJUPV6fqiKIMyXbud9ZLg4F3Yza1vJ1+93g==}
+
+ ccount@2.0.1:
+ resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==}
+
+ character-entities-html4@2.1.0:
+ resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==}
+
+ character-entities-legacy@3.0.0:
+ resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==}
+
+ character-entities@2.0.2:
+ resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==}
+
+ character-reference-invalid@2.0.1:
+ resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==}
+
+ chokidar@5.0.0:
+ resolution: {integrity: sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==}
+ engines: {node: '>= 20.19.0'}
+
+ class-variance-authority@0.7.1:
+ resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==}
+
+ client-only@0.0.1:
+ resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==}
+
+ clsx@2.1.1:
+ resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==}
+ engines: {node: '>=6'}
+
+ collapse-white-space@2.1.0:
+ resolution: {integrity: sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==}
+
+ comma-separated-tokens@2.0.3:
+ resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==}
+
+ compute-scroll-into-view@3.1.1:
+ resolution: {integrity: sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw==}
+
+ cssesc@3.0.0:
+ resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==}
+ engines: {node: '>=4'}
+ hasBin: true
+
+ csstype@3.2.3:
+ resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==}
+
+ debug@4.4.3:
+ resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==}
+ engines: {node: '>=6.0'}
+ peerDependencies:
+ supports-color: '*'
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+
+ decode-named-character-reference@1.2.0:
+ resolution: {integrity: sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==}
+
+ dequal@2.0.3:
+ resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==}
+ engines: {node: '>=6'}
+
+ detect-libc@2.1.2:
+ resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==}
+ engines: {node: '>=8'}
+
+ detect-node-es@1.1.0:
+ resolution: {integrity: sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==}
+
+ devlop@1.1.0:
+ resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==}
+
+ electron-to-chromium@1.5.267:
+ resolution: {integrity: sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==}
+
+ enhanced-resolve@5.18.4:
+ resolution: {integrity: sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==}
+ engines: {node: '>=10.13.0'}
+
+ esast-util-from-estree@2.0.0:
+ resolution: {integrity: sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==}
+
+ esast-util-from-js@2.0.1:
+ resolution: {integrity: sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==}
+
+ esbuild@0.27.2:
+ resolution: {integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==}
+ engines: {node: '>=18'}
+ hasBin: true
+
+ escalade@3.2.0:
+ resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
+ engines: {node: '>=6'}
+
+ escape-string-regexp@5.0.0:
+ resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==}
+ engines: {node: '>=12'}
+
+ estree-util-attach-comments@3.0.0:
+ resolution: {integrity: sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==}
+
+ estree-util-build-jsx@3.0.1:
+ resolution: {integrity: sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==}
+
+ estree-util-is-identifier-name@3.0.0:
+ resolution: {integrity: sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==}
+
+ estree-util-scope@1.0.0:
+ resolution: {integrity: sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==}
+
+ estree-util-to-js@2.0.0:
+ resolution: {integrity: sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==}
+
+ estree-util-value-to-estree@3.5.0:
+ resolution: {integrity: sha512-aMV56R27Gv3QmfmF1MY12GWkGzzeAezAX+UplqHVASfjc9wNzI/X6hC0S9oxq61WT4aQesLGslWP9tKk6ghRZQ==}
+
+ estree-util-visit@2.0.0:
+ resolution: {integrity: sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==}
+
+ estree-walker@3.0.3:
+ resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==}
+
+ extend@3.0.2:
+ resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==}
+
+ fdir@6.5.0:
+ resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==}
+ engines: {node: '>=12.0.0'}
+ peerDependencies:
+ picomatch: ^3 || ^4
+ peerDependenciesMeta:
+ picomatch:
+ optional: true
+
+ fraction.js@5.3.4:
+ resolution: {integrity: sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==}
+
+ fumadocs-core@16.4.7:
+ resolution: {integrity: sha512-oEsoha5EjyQnhRb6s5tNYEM+AiDA4BN80RyevRohsKPXGRQ2K3ddMaFAQq5kBaqA/Xxb+vqrElyRtzmdif7w2A==}
+ peerDependencies:
+ '@mixedbread/sdk': ^0.46.0
+ '@orama/core': 1.x.x
+ '@oramacloud/client': 2.x.x
+ '@tanstack/react-router': 1.x.x
+ '@types/react': '*'
+ algoliasearch: 5.x.x
+ lucide-react: '*'
+ next: 16.x.x
+ react: ^19.2.0
+ react-dom: ^19.2.0
+ react-router: 7.x.x
+ waku: ^0.26.0 || ^0.27.0
+ zod: 4.x.x
+ peerDependenciesMeta:
+ '@mixedbread/sdk':
+ optional: true
+ '@orama/core':
+ optional: true
+ '@oramacloud/client':
+ optional: true
+ '@tanstack/react-router':
+ optional: true
+ '@types/react':
+ optional: true
+ algoliasearch:
+ optional: true
+ lucide-react:
+ optional: true
+ next:
+ optional: true
+ react:
+ optional: true
+ react-dom:
+ optional: true
+ react-router:
+ optional: true
+ waku:
+ optional: true
+ zod:
+ optional: true
+
+ fumadocs-mdx@14.2.5:
+ resolution: {integrity: sha512-1WJeJ1Xago2lRq6GhTvTb+hxDtWUBr7lHi4YgHNBYSpWKsTfOor3UxgZV1UYBrd32cq4xHdtMK33LM67gA0eBA==}
+ hasBin: true
+ peerDependencies:
+ '@fumadocs/mdx-remote': ^1.4.0
+ '@types/react': '*'
+ fumadocs-core: ^15.0.0 || ^16.0.0
+ next: ^15.3.0 || ^16.0.0
+ react: '*'
+ vite: 6.x.x || 7.x.x
+ peerDependenciesMeta:
+ '@fumadocs/mdx-remote':
+ optional: true
+ '@types/react':
+ optional: true
+ next:
+ optional: true
+ react:
+ optional: true
+ vite:
+ optional: true
+
+ fumadocs-ui@16.4.7:
+ resolution: {integrity: sha512-ShEftF54mj89EW7Wll2wwGcH6bNTmPrPtUUmO+ThakK13skJmY7GSBH3Ft51TzQNLhN3kBKEQipIlJWc7LT5NQ==}
+ peerDependencies:
+ '@types/react': '*'
+ fumadocs-core: 16.4.7
+ next: 16.x.x
+ react: ^19.2.0
+ react-dom: ^19.2.0
+ tailwindcss: ^4.0.0
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ next:
+ optional: true
+ tailwindcss:
+ optional: true
+
+ get-nonce@1.0.1:
+ resolution: {integrity: sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==}
+ engines: {node: '>=6'}
+
+ github-slugger@2.0.0:
+ resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==}
+
+ graceful-fs@4.2.11:
+ resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
+
+ hast-util-to-estree@3.1.3:
+ resolution: {integrity: sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==}
+
+ hast-util-to-html@9.0.5:
+ resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==}
+
+ hast-util-to-jsx-runtime@2.3.6:
+ resolution: {integrity: sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==}
+
+ hast-util-to-string@3.0.1:
+ resolution: {integrity: sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==}
+
+ hast-util-whitespace@3.0.0:
+ resolution: {integrity: sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==}
+
+ html-void-elements@3.0.0:
+ resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==}
+
+ image-size@2.0.2:
+ resolution: {integrity: sha512-IRqXKlaXwgSMAMtpNzZa1ZAe8m+Sa1770Dhk8VkSsP9LS+iHD62Zd8FQKs8fbPiagBE7BzoFX23cxFnwshpV6w==}
+ engines: {node: '>=16.x'}
+ hasBin: true
+
+ inline-style-parser@0.2.7:
+ resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==}
+
+ is-alphabetical@2.0.1:
+ resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==}
+
+ is-alphanumerical@2.0.1:
+ resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==}
+
+ is-decimal@2.0.1:
+ resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==}
+
+ is-hexadecimal@2.0.1:
+ resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==}
+
+ is-plain-obj@4.1.0:
+ resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==}
+ engines: {node: '>=12'}
+
+ jiti@2.6.1:
+ resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==}
+ hasBin: true
+
+ js-yaml@4.1.1:
+ resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==}
+ hasBin: true
+
+ lightningcss-android-arm64@1.30.2:
+ resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [arm64]
+ os: [android]
+
+ lightningcss-darwin-arm64@1.30.2:
+ resolution: {integrity: sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [arm64]
+ os: [darwin]
+
+ lightningcss-darwin-x64@1.30.2:
+ resolution: {integrity: sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [x64]
+ os: [darwin]
+
+ lightningcss-freebsd-x64@1.30.2:
+ resolution: {integrity: sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [x64]
+ os: [freebsd]
+
+ lightningcss-linux-arm-gnueabihf@1.30.2:
+ resolution: {integrity: sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [arm]
+ os: [linux]
+
+ lightningcss-linux-arm64-gnu@1.30.2:
+ resolution: {integrity: sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [arm64]
+ os: [linux]
+
+ lightningcss-linux-arm64-musl@1.30.2:
+ resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [arm64]
+ os: [linux]
+
+ lightningcss-linux-x64-gnu@1.30.2:
+ resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [x64]
+ os: [linux]
+
+ lightningcss-linux-x64-musl@1.30.2:
+ resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [x64]
+ os: [linux]
+
+ lightningcss-win32-arm64-msvc@1.30.2:
+ resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [arm64]
+ os: [win32]
+
+ lightningcss-win32-x64-msvc@1.30.2:
+ resolution: {integrity: sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==}
+ engines: {node: '>= 12.0.0'}
+ cpu: [x64]
+ os: [win32]
+
+ lightningcss@1.30.2:
+ resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==}
+ engines: {node: '>= 12.0.0'}
+
+ longest-streak@3.1.0:
+ resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==}
+
+ lucide-react@0.562.0:
+ resolution: {integrity: sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==}
+ peerDependencies:
+ react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0
+
+ magic-string@0.30.21:
+ resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==}
+
+ markdown-extensions@2.0.0:
+ resolution: {integrity: sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==}
+ engines: {node: '>=16'}
+
+ markdown-table@3.0.4:
+ resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==}
+
+ mdast-util-find-and-replace@3.0.2:
+ resolution: {integrity: sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==}
+
+ mdast-util-from-markdown@2.0.2:
+ resolution: {integrity: sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==}
+
+ mdast-util-gfm-autolink-literal@2.0.1:
+ resolution: {integrity: sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==}
+
+ mdast-util-gfm-footnote@2.1.0:
+ resolution: {integrity: sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==}
+
+ mdast-util-gfm-strikethrough@2.0.0:
+ resolution: {integrity: sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==}
+
+ mdast-util-gfm-table@2.0.0:
+ resolution: {integrity: sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==}
+
+ mdast-util-gfm-task-list-item@2.0.0:
+ resolution: {integrity: sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==}
+
+ mdast-util-gfm@3.1.0:
+ resolution: {integrity: sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==}
+
+ mdast-util-mdx-expression@2.0.1:
+ resolution: {integrity: sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==}
+
+ mdast-util-mdx-jsx@3.2.0:
+ resolution: {integrity: sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==}
+
+ mdast-util-mdx@3.0.0:
+ resolution: {integrity: sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==}
+
+ mdast-util-mdxjs-esm@2.0.1:
+ resolution: {integrity: sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==}
+
+ mdast-util-phrasing@4.1.0:
+ resolution: {integrity: sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==}
+
+ mdast-util-to-hast@13.2.1:
+ resolution: {integrity: sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==}
+
+ mdast-util-to-markdown@2.1.2:
+ resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==}
+
+ mdast-util-to-string@4.0.0:
+ resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==}
+
+ micromark-core-commonmark@2.0.3:
+ resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==}
+
+ micromark-extension-gfm-autolink-literal@2.1.0:
+ resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==}
+
+ micromark-extension-gfm-footnote@2.1.0:
+ resolution: {integrity: sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==}
+
+ micromark-extension-gfm-strikethrough@2.1.0:
+ resolution: {integrity: sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==}
+
+ micromark-extension-gfm-table@2.1.1:
+ resolution: {integrity: sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==}
+
+ micromark-extension-gfm-tagfilter@2.0.0:
+ resolution: {integrity: sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==}
+
+ micromark-extension-gfm-task-list-item@2.1.0:
+ resolution: {integrity: sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==}
+
+ micromark-extension-gfm@3.0.0:
+ resolution: {integrity: sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==}
+
+ micromark-extension-mdx-expression@3.0.1:
+ resolution: {integrity: sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==}
+
+ micromark-extension-mdx-jsx@3.0.2:
+ resolution: {integrity: sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==}
+
+ micromark-extension-mdx-md@2.0.0:
+ resolution: {integrity: sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==}
+
+ micromark-extension-mdxjs-esm@3.0.0:
+ resolution: {integrity: sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==}
+
+ micromark-extension-mdxjs@3.0.0:
+ resolution: {integrity: sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==}
+
+ micromark-factory-destination@2.0.1:
+ resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==}
+
+ micromark-factory-label@2.0.1:
+ resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==}
+
+ micromark-factory-mdx-expression@2.0.3:
+ resolution: {integrity: sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==}
+
+ micromark-factory-space@2.0.1:
+ resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==}
+
+ micromark-factory-title@2.0.1:
+ resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==}
+
+ micromark-factory-whitespace@2.0.1:
+ resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==}
+
+ micromark-util-character@2.1.1:
+ resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==}
+
+ micromark-util-chunked@2.0.1:
+ resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==}
+
+ micromark-util-classify-character@2.0.1:
+ resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==}
+
+ micromark-util-combine-extensions@2.0.1:
+ resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==}
+
+ micromark-util-decode-numeric-character-reference@2.0.2:
+ resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==}
+
+ micromark-util-decode-string@2.0.1:
+ resolution: {integrity: sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==}
+
+ micromark-util-encode@2.0.1:
+ resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==}
+
+ micromark-util-events-to-acorn@2.0.3:
+ resolution: {integrity: sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==}
+
+ micromark-util-html-tag-name@2.0.1:
+ resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==}
+
+ micromark-util-normalize-identifier@2.0.1:
+ resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==}
+
+ micromark-util-resolve-all@2.0.1:
+ resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==}
+
+ micromark-util-sanitize-uri@2.0.1:
+ resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==}
+
+ micromark-util-subtokenize@2.1.0:
+ resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==}
+
+ micromark-util-symbol@2.0.1:
+ resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==}
+
+ micromark-util-types@2.0.2:
+ resolution: {integrity: sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==}
+
+ micromark@4.0.2:
+ resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==}
+
+ ms@2.1.3:
+ resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
+
+ nanoid@3.3.11:
+ resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==}
+ engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
+ hasBin: true
+
+ negotiator@1.0.0:
+ resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==}
+ engines: {node: '>= 0.6'}
+
+ next-themes@0.4.6:
+ resolution: {integrity: sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==}
+ peerDependencies:
+ react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
+
+ next@16.1.1:
+ resolution: {integrity: sha512-QI+T7xrxt1pF6SQ/JYFz95ro/mg/1Znk5vBebsWwbpejj1T0A23hO7GYEaVac9QUOT2BIMiuzm0L99ooq7k0/w==}
+ engines: {node: '>=20.9.0'}
+ hasBin: true
+ peerDependencies:
+ '@opentelemetry/api': ^1.1.0
+ '@playwright/test': ^1.51.1
+ babel-plugin-react-compiler: '*'
+ react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0
+ react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0
+ sass: ^1.3.0
+ peerDependenciesMeta:
+ '@opentelemetry/api':
+ optional: true
+ '@playwright/test':
+ optional: true
+ babel-plugin-react-compiler:
+ optional: true
+ sass:
+ optional: true
+
+ node-releases@2.0.27:
+ resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==}
+
+ npm-to-yarn@3.0.1:
+ resolution: {integrity: sha512-tt6PvKu4WyzPwWUzy/hvPFqn+uwXO0K1ZHka8az3NnrhWJDmSqI8ncWq0fkL0k/lmmi5tAC11FXwXuh0rFbt1A==}
+ engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+
+ oniguruma-parser@0.12.1:
+ resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==}
+
+ oniguruma-to-es@4.3.4:
+ resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==}
+
+ parse-entities@4.0.2:
+ resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==}
+
+ path-to-regexp@8.3.0:
+ resolution: {integrity: sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==}
+
+ picocolors@1.1.1:
+ resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
+
+ picomatch@4.0.3:
+ resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==}
+ engines: {node: '>=12'}
+
+ postcss-selector-parser@7.1.1:
+ resolution: {integrity: sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==}
+ engines: {node: '>=4'}
+
+ postcss-value-parser@4.2.0:
+ resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==}
+
+ postcss@8.4.31:
+ resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==}
+ engines: {node: ^10 || ^12 || >=14}
+
+ postcss@8.5.6:
+ resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==}
+ engines: {node: ^10 || ^12 || >=14}
+
+ property-information@7.1.0:
+ resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==}
+
+ react-dom@19.2.3:
+ resolution: {integrity: sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==}
+ peerDependencies:
+ react: ^19.2.3
+
+ react-medium-image-zoom@5.4.0:
+ resolution: {integrity: sha512-BsE+EnFVQzFIlyuuQrZ9iTwyKpKkqdFZV1ImEQN573QPqGrIUuNni7aF+sZwDcxlsuOMayCr6oO/PZR/yJnbRg==}
+ peerDependencies:
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
+ react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
+
+ react-remove-scroll-bar@2.3.8:
+ resolution: {integrity: sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ react-remove-scroll@2.7.2:
+ resolution: {integrity: sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ react-style-singleton@2.2.3:
+ resolution: {integrity: sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ react@19.2.3:
+ resolution: {integrity: sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==}
+ engines: {node: '>=0.10.0'}
+
+ readdirp@5.0.0:
+ resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==}
+ engines: {node: '>= 20.19.0'}
+
+ recma-build-jsx@1.0.0:
+ resolution: {integrity: sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==}
+
+ recma-jsx@1.0.1:
+ resolution: {integrity: sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==}
+ peerDependencies:
+ acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+
+ recma-parse@1.0.0:
+ resolution: {integrity: sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==}
+
+ recma-stringify@1.0.0:
+ resolution: {integrity: sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==}
+
+ regex-recursion@6.0.2:
+ resolution: {integrity: sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==}
+
+ regex-utilities@2.3.0:
+ resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==}
+
+ regex@6.1.0:
+ resolution: {integrity: sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==}
+
+ rehype-recma@1.0.0:
+ resolution: {integrity: sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==}
+
+ remark-gfm@4.0.1:
+ resolution: {integrity: sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==}
+
+ remark-mdx@3.1.1:
+ resolution: {integrity: sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==}
+
+ remark-parse@11.0.0:
+ resolution: {integrity: sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==}
+
+ remark-rehype@11.1.2:
+ resolution: {integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==}
+
+ remark-stringify@11.0.0:
+ resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==}
+
+ remark@15.0.1:
+ resolution: {integrity: sha512-Eht5w30ruCXgFmxVUSlNWQ9iiimq07URKeFS3hNc8cUWy1llX4KDWfyEDZRycMc+znsN9Ux5/tJ/BFdgdOwA3A==}
+
+ scheduler@0.27.0:
+ resolution: {integrity: sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==}
+
+ scroll-into-view-if-needed@3.1.0:
+ resolution: {integrity: sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==}
+
+ semver@7.7.3:
+ resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==}
+ engines: {node: '>=10'}
+ hasBin: true
+
+ sharp@0.34.5:
+ resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==}
+ engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
+
+ shiki@3.21.0:
+ resolution: {integrity: sha512-N65B/3bqL/TI2crrXr+4UivctrAGEjmsib5rPMMPpFp1xAx/w03v8WZ9RDDFYteXoEgY7qZ4HGgl5KBIu1153w==}
+
+ source-map-js@1.2.1:
+ resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
+ engines: {node: '>=0.10.0'}
+
+ source-map@0.7.6:
+ resolution: {integrity: sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==}
+ engines: {node: '>= 12'}
+
+ space-separated-tokens@2.0.2:
+ resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==}
+
+ stringify-entities@4.0.4:
+ resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==}
+
+ style-to-js@1.1.21:
+ resolution: {integrity: sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==}
+
+ style-to-object@1.0.14:
+ resolution: {integrity: sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==}
+
+ styled-jsx@5.1.6:
+ resolution: {integrity: sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==}
+ engines: {node: '>= 12.0.0'}
+ peerDependencies:
+ '@babel/core': '*'
+ babel-plugin-macros: '*'
+ react: '>= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0'
+ peerDependenciesMeta:
+ '@babel/core':
+ optional: true
+ babel-plugin-macros:
+ optional: true
+
+ tailwind-merge@3.4.0:
+ resolution: {integrity: sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==}
+
+ tailwindcss@4.1.18:
+ resolution: {integrity: sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==}
+
+ tapable@2.3.0:
+ resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==}
+ engines: {node: '>=6'}
+
+ tinyexec@1.0.2:
+ resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==}
+ engines: {node: '>=18'}
+
+ tinyglobby@0.2.15:
+ resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==}
+ engines: {node: '>=12.0.0'}
+
+ trim-lines@3.0.1:
+ resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==}
+
+ trough@2.2.0:
+ resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==}
+
+ tslib@2.8.1:
+ resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
+
+ typescript@5.9.3:
+ resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
+ engines: {node: '>=14.17'}
+ hasBin: true
+
+ undici-types@6.21.0:
+ resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
+
+ unified@11.0.5:
+ resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==}
+
+ unist-util-is@6.0.1:
+ resolution: {integrity: sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==}
+
+ unist-util-position-from-estree@2.0.0:
+ resolution: {integrity: sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==}
+
+ unist-util-position@5.0.0:
+ resolution: {integrity: sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==}
+
+ unist-util-remove-position@5.0.0:
+ resolution: {integrity: sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==}
+
+ unist-util-stringify-position@4.0.0:
+ resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==}
+
+ unist-util-visit-parents@6.0.2:
+ resolution: {integrity: sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==}
+
+ unist-util-visit@5.0.0:
+ resolution: {integrity: sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==}
+
+ update-browserslist-db@1.2.3:
+ resolution: {integrity: sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==}
+ hasBin: true
+ peerDependencies:
+ browserslist: '>= 4.21.0'
+
+ use-callback-ref@1.3.3:
+ resolution: {integrity: sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ use-sidecar@1.1.3:
+ resolution: {integrity: sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==}
+ engines: {node: '>=10'}
+ peerDependencies:
+ '@types/react': '*'
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+
+ util-deprecate@1.0.2:
+ resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
+
+ vfile-message@4.0.3:
+ resolution: {integrity: sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==}
+
+ vfile@6.0.3:
+ resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==}
+
+ zod@4.3.5:
+ resolution: {integrity: sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==}
+
+ zwitch@2.0.4:
+ resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==}
+
+snapshots:
+
+ '@alloc/quick-lru@5.2.0': {}
+
+ '@emnapi/runtime@1.8.1':
+ dependencies:
+ tslib: 2.8.1
+ optional: true
+
+ '@esbuild/aix-ppc64@0.27.2':
+ optional: true
+
+ '@esbuild/android-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/android-arm@0.27.2':
+ optional: true
+
+ '@esbuild/android-x64@0.27.2':
+ optional: true
+
+ '@esbuild/darwin-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/darwin-x64@0.27.2':
+ optional: true
+
+ '@esbuild/freebsd-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/freebsd-x64@0.27.2':
+ optional: true
+
+ '@esbuild/linux-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/linux-arm@0.27.2':
+ optional: true
+
+ '@esbuild/linux-ia32@0.27.2':
+ optional: true
+
+ '@esbuild/linux-loong64@0.27.2':
+ optional: true
+
+ '@esbuild/linux-mips64el@0.27.2':
+ optional: true
+
+ '@esbuild/linux-ppc64@0.27.2':
+ optional: true
+
+ '@esbuild/linux-riscv64@0.27.2':
+ optional: true
+
+ '@esbuild/linux-s390x@0.27.2':
+ optional: true
+
+ '@esbuild/linux-x64@0.27.2':
+ optional: true
+
+ '@esbuild/netbsd-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/netbsd-x64@0.27.2':
+ optional: true
+
+ '@esbuild/openbsd-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/openbsd-x64@0.27.2':
+ optional: true
+
+ '@esbuild/openharmony-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/sunos-x64@0.27.2':
+ optional: true
+
+ '@esbuild/win32-arm64@0.27.2':
+ optional: true
+
+ '@esbuild/win32-ia32@0.27.2':
+ optional: true
+
+ '@esbuild/win32-x64@0.27.2':
+ optional: true
+
+ '@floating-ui/core@1.7.3':
+ dependencies:
+ '@floating-ui/utils': 0.2.10
+
+ '@floating-ui/dom@1.7.4':
+ dependencies:
+ '@floating-ui/core': 1.7.3
+ '@floating-ui/utils': 0.2.10
+
+ '@floating-ui/react-dom@2.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@floating-ui/dom': 1.7.4
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+
+ '@floating-ui/utils@0.2.10': {}
+
+ '@formatjs/fast-memoize@3.0.3':
+ dependencies:
+ tslib: 2.8.1
+
+ '@formatjs/intl-localematcher@0.7.5':
+ dependencies:
+ '@formatjs/fast-memoize': 3.0.3
+ tslib: 2.8.1
+
+ '@fumadocs/ui@16.4.7(@types/react@19.2.8)(fumadocs-core@16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(tailwindcss@4.1.18)':
+ dependencies:
+ fumadocs-core: 16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5)
+ next-themes: 0.4.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ postcss-selector-parser: 7.1.1
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ tailwind-merge: 3.4.0
+ optionalDependencies:
+ '@types/react': 19.2.8
+ next: 16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ tailwindcss: 4.1.18
+
+ '@img/colour@1.0.0':
+ optional: true
+
+ '@img/sharp-darwin-arm64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-darwin-arm64': 1.2.4
+ optional: true
+
+ '@img/sharp-darwin-x64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-darwin-x64': 1.2.4
+ optional: true
+
+ '@img/sharp-libvips-darwin-arm64@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-darwin-x64@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linux-arm64@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linux-arm@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linux-ppc64@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linux-riscv64@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linux-s390x@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linux-x64@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linuxmusl-arm64@1.2.4':
+ optional: true
+
+ '@img/sharp-libvips-linuxmusl-x64@1.2.4':
+ optional: true
+
+ '@img/sharp-linux-arm64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linux-arm64': 1.2.4
+ optional: true
+
+ '@img/sharp-linux-arm@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linux-arm': 1.2.4
+ optional: true
+
+ '@img/sharp-linux-ppc64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linux-ppc64': 1.2.4
+ optional: true
+
+ '@img/sharp-linux-riscv64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linux-riscv64': 1.2.4
+ optional: true
+
+ '@img/sharp-linux-s390x@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linux-s390x': 1.2.4
+ optional: true
+
+ '@img/sharp-linux-x64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linux-x64': 1.2.4
+ optional: true
+
+ '@img/sharp-linuxmusl-arm64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linuxmusl-arm64': 1.2.4
+ optional: true
+
+ '@img/sharp-linuxmusl-x64@0.34.5':
+ optionalDependencies:
+ '@img/sharp-libvips-linuxmusl-x64': 1.2.4
+ optional: true
+
+ '@img/sharp-wasm32@0.34.5':
+ dependencies:
+ '@emnapi/runtime': 1.8.1
+ optional: true
+
+ '@img/sharp-win32-arm64@0.34.5':
+ optional: true
+
+ '@img/sharp-win32-ia32@0.34.5':
+ optional: true
+
+ '@img/sharp-win32-x64@0.34.5':
+ optional: true
+
+ '@jridgewell/gen-mapping@0.3.13':
+ dependencies:
+ '@jridgewell/sourcemap-codec': 1.5.5
+ '@jridgewell/trace-mapping': 0.3.31
+
+ '@jridgewell/remapping@2.3.5':
+ dependencies:
+ '@jridgewell/gen-mapping': 0.3.13
+ '@jridgewell/trace-mapping': 0.3.31
+
+ '@jridgewell/resolve-uri@3.1.2': {}
+
+ '@jridgewell/sourcemap-codec@1.5.5': {}
+
+ '@jridgewell/trace-mapping@0.3.31':
+ dependencies:
+ '@jridgewell/resolve-uri': 3.1.2
+ '@jridgewell/sourcemap-codec': 1.5.5
+
+ '@mdx-js/mdx@3.1.1':
+ dependencies:
+ '@types/estree': 1.0.8
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ '@types/mdx': 2.0.13
+ acorn: 8.15.0
+ collapse-white-space: 2.1.0
+ devlop: 1.1.0
+ estree-util-is-identifier-name: 3.0.0
+ estree-util-scope: 1.0.0
+ estree-walker: 3.0.3
+ hast-util-to-jsx-runtime: 2.3.6
+ markdown-extensions: 2.0.0
+ recma-build-jsx: 1.0.0
+ recma-jsx: 1.0.1(acorn@8.15.0)
+ recma-stringify: 1.0.0
+ rehype-recma: 1.0.0
+ remark-mdx: 3.1.1
+ remark-parse: 11.0.0
+ remark-rehype: 11.1.2
+ source-map: 0.7.6
+ unified: 11.0.5
+ unist-util-position-from-estree: 2.0.0
+ unist-util-stringify-position: 4.0.0
+ unist-util-visit: 5.0.0
+ vfile: 6.0.3
+ transitivePeerDependencies:
+ - supports-color
+
+ '@next/env@16.1.1': {}
+
+ '@next/swc-darwin-arm64@16.1.1':
+ optional: true
+
+ '@next/swc-darwin-x64@16.1.1':
+ optional: true
+
+ '@next/swc-linux-arm64-gnu@16.1.1':
+ optional: true
+
+ '@next/swc-linux-arm64-musl@16.1.1':
+ optional: true
+
+ '@next/swc-linux-x64-gnu@16.1.1':
+ optional: true
+
+ '@next/swc-linux-x64-musl@16.1.1':
+ optional: true
+
+ '@next/swc-win32-arm64-msvc@16.1.1':
+ optional: true
+
+ '@next/swc-win32-x64-msvc@16.1.1':
+ optional: true
+
+ '@orama/orama@3.1.18': {}
+
+ '@radix-ui/number@1.1.1': {}
+
+ '@radix-ui/primitive@1.1.3': {}
+
+ '@radix-ui/react-accordion@1.2.12(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collapsible': 1.1.12(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-direction': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-id': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-arrow@1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-collapsible@1.1.12(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-id': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-collection@1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-slot': 1.2.3(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-compose-refs@1.1.2(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-context@1.1.2(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-dialog@1.1.15(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-focus-guards': 1.1.3(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-id': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-slot': 1.2.3(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.8)(react@19.2.3)
+ aria-hidden: 1.2.6
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ react-remove-scroll: 2.7.2(@types/react@19.2.8)(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-direction@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-dismissable-layer@1.1.11(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-escape-keydown': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-focus-guards@1.1.3(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-focus-scope@1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-id@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-navigation-menu@1.2.14(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-direction': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-id': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-previous': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-visually-hidden': 1.2.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-popover@1.1.15(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-dismissable-layer': 1.1.11(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-focus-guards': 1.1.3(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-focus-scope': 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-id': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-popper': 1.2.8(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-portal': 1.1.9(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-slot': 1.2.3(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.8)(react@19.2.3)
+ aria-hidden: 1.2.6
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ react-remove-scroll: 2.7.2(@types/react@19.2.8)(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-popper@1.2.8(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@floating-ui/react-dom': 2.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-arrow': 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-rect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-size': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/rect': 1.1.1
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-portal@1.1.9(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-presence@1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-primitive@2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-slot': 1.2.3(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-roving-focus@1.1.11(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-direction': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-id': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-scroll-area@1.2.10(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/number': 1.1.1
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-direction': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-slot@1.2.3(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-slot@1.2.4(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-tabs@1.1.13(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.3
+ '@radix-ui/react-context': 1.1.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-direction': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-id': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-roving-focus': 1.1.11(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/react-use-callback-ref@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-use-controllable-state@1.2.2(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-use-effect-event': 0.0.2(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-use-effect-event@0.0.2(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-use-escape-keydown@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-use-layout-effect@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-use-previous@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-use-rect@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/rect': 1.1.1
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-use-size@1.1.1(@types/react@19.2.8)(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ react: 19.2.3
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ '@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)':
+ dependencies:
+ '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+ '@types/react-dom': 19.2.3(@types/react@19.2.8)
+
+ '@radix-ui/rect@1.1.1': {}
+
+ '@shikijs/core@3.21.0':
+ dependencies:
+ '@shikijs/types': 3.21.0
+ '@shikijs/vscode-textmate': 10.0.2
+ '@types/hast': 3.0.4
+ hast-util-to-html: 9.0.5
+
+ '@shikijs/engine-javascript@3.21.0':
+ dependencies:
+ '@shikijs/types': 3.21.0
+ '@shikijs/vscode-textmate': 10.0.2
+ oniguruma-to-es: 4.3.4
+
+ '@shikijs/engine-oniguruma@3.21.0':
+ dependencies:
+ '@shikijs/types': 3.21.0
+ '@shikijs/vscode-textmate': 10.0.2
+
+ '@shikijs/langs@3.21.0':
+ dependencies:
+ '@shikijs/types': 3.21.0
+
+ '@shikijs/rehype@3.21.0':
+ dependencies:
+ '@shikijs/types': 3.21.0
+ '@types/hast': 3.0.4
+ hast-util-to-string: 3.0.1
+ shiki: 3.21.0
+ unified: 11.0.5
+ unist-util-visit: 5.0.0
+
+ '@shikijs/themes@3.21.0':
+ dependencies:
+ '@shikijs/types': 3.21.0
+
+ '@shikijs/transformers@3.21.0':
+ dependencies:
+ '@shikijs/core': 3.21.0
+ '@shikijs/types': 3.21.0
+
+ '@shikijs/types@3.21.0':
+ dependencies:
+ '@shikijs/vscode-textmate': 10.0.2
+ '@types/hast': 3.0.4
+
+ '@shikijs/vscode-textmate@10.0.2': {}
+
+ '@standard-schema/spec@1.1.0': {}
+
+ '@swc/helpers@0.5.15':
+ dependencies:
+ tslib: 2.8.1
+
+ '@tailwindcss/node@4.1.18':
+ dependencies:
+ '@jridgewell/remapping': 2.3.5
+ enhanced-resolve: 5.18.4
+ jiti: 2.6.1
+ lightningcss: 1.30.2
+ magic-string: 0.30.21
+ source-map-js: 1.2.1
+ tailwindcss: 4.1.18
+
+ '@tailwindcss/oxide-android-arm64@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-darwin-arm64@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-darwin-x64@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-freebsd-x64@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-linux-arm64-gnu@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-linux-arm64-musl@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-linux-x64-gnu@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-linux-x64-musl@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-wasm32-wasi@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-win32-arm64-msvc@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide-win32-x64-msvc@4.1.18':
+ optional: true
+
+ '@tailwindcss/oxide@4.1.18':
+ optionalDependencies:
+ '@tailwindcss/oxide-android-arm64': 4.1.18
+ '@tailwindcss/oxide-darwin-arm64': 4.1.18
+ '@tailwindcss/oxide-darwin-x64': 4.1.18
+ '@tailwindcss/oxide-freebsd-x64': 4.1.18
+ '@tailwindcss/oxide-linux-arm-gnueabihf': 4.1.18
+ '@tailwindcss/oxide-linux-arm64-gnu': 4.1.18
+ '@tailwindcss/oxide-linux-arm64-musl': 4.1.18
+ '@tailwindcss/oxide-linux-x64-gnu': 4.1.18
+ '@tailwindcss/oxide-linux-x64-musl': 4.1.18
+ '@tailwindcss/oxide-wasm32-wasi': 4.1.18
+ '@tailwindcss/oxide-win32-arm64-msvc': 4.1.18
+ '@tailwindcss/oxide-win32-x64-msvc': 4.1.18
+
+ '@tailwindcss/postcss@4.1.18':
+ dependencies:
+ '@alloc/quick-lru': 5.2.0
+ '@tailwindcss/node': 4.1.18
+ '@tailwindcss/oxide': 4.1.18
+ postcss: 8.5.6
+ tailwindcss: 4.1.18
+
+ '@types/debug@4.1.12':
+ dependencies:
+ '@types/ms': 2.1.0
+
+ '@types/estree-jsx@1.0.5':
+ dependencies:
+ '@types/estree': 1.0.8
+
+ '@types/estree@1.0.8': {}
+
+ '@types/hast@3.0.4':
+ dependencies:
+ '@types/unist': 3.0.3
+
+ '@types/mdast@4.0.4':
+ dependencies:
+ '@types/unist': 3.0.3
+
+ '@types/mdx@2.0.13': {}
+
+ '@types/ms@2.1.0': {}
+
+ '@types/node@22.19.6':
+ dependencies:
+ undici-types: 6.21.0
+
+ '@types/react-dom@19.2.3(@types/react@19.2.8)':
+ dependencies:
+ '@types/react': 19.2.8
+
+ '@types/react@19.2.8':
+ dependencies:
+ csstype: 3.2.3
+
+ '@types/unist@2.0.11': {}
+
+ '@types/unist@3.0.3': {}
+
+ '@ungap/structured-clone@1.3.0': {}
+
+ acorn-jsx@5.3.2(acorn@8.15.0):
+ dependencies:
+ acorn: 8.15.0
+
+ acorn@8.15.0: {}
+
+ argparse@2.0.1: {}
+
+ aria-hidden@1.2.6:
+ dependencies:
+ tslib: 2.8.1
+
+ astring@1.9.0: {}
+
+ autoprefixer@10.4.23(postcss@8.5.6):
+ dependencies:
+ browserslist: 4.28.1
+ caniuse-lite: 1.0.30001764
+ fraction.js: 5.3.4
+ picocolors: 1.1.1
+ postcss: 8.5.6
+ postcss-value-parser: 4.2.0
+
+ bail@2.0.2: {}
+
+ baseline-browser-mapping@2.9.14: {}
+
+ browserslist@4.28.1:
+ dependencies:
+ baseline-browser-mapping: 2.9.14
+ caniuse-lite: 1.0.30001764
+ electron-to-chromium: 1.5.267
+ node-releases: 2.0.27
+ update-browserslist-db: 1.2.3(browserslist@4.28.1)
+
+ caniuse-lite@1.0.30001764: {}
+
+ ccount@2.0.1: {}
+
+ character-entities-html4@2.1.0: {}
+
+ character-entities-legacy@3.0.0: {}
+
+ character-entities@2.0.2: {}
+
+ character-reference-invalid@2.0.1: {}
+
+ chokidar@5.0.0:
+ dependencies:
+ readdirp: 5.0.0
+
+ class-variance-authority@0.7.1:
+ dependencies:
+ clsx: 2.1.1
+
+ client-only@0.0.1: {}
+
+ clsx@2.1.1: {}
+
+ collapse-white-space@2.1.0: {}
+
+ comma-separated-tokens@2.0.3: {}
+
+ compute-scroll-into-view@3.1.1: {}
+
+ cssesc@3.0.0: {}
+
+ csstype@3.2.3: {}
+
+ debug@4.4.3:
+ dependencies:
+ ms: 2.1.3
+
+ decode-named-character-reference@1.2.0:
+ dependencies:
+ character-entities: 2.0.2
+
+ dequal@2.0.3: {}
+
+ detect-libc@2.1.2: {}
+
+ detect-node-es@1.1.0: {}
+
+ devlop@1.1.0:
+ dependencies:
+ dequal: 2.0.3
+
+ electron-to-chromium@1.5.267: {}
+
+ enhanced-resolve@5.18.4:
+ dependencies:
+ graceful-fs: 4.2.11
+ tapable: 2.3.0
+
+ esast-util-from-estree@2.0.0:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ devlop: 1.1.0
+ estree-util-visit: 2.0.0
+ unist-util-position-from-estree: 2.0.0
+
+ esast-util-from-js@2.0.1:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ acorn: 8.15.0
+ esast-util-from-estree: 2.0.0
+ vfile-message: 4.0.3
+
+ esbuild@0.27.2:
+ optionalDependencies:
+ '@esbuild/aix-ppc64': 0.27.2
+ '@esbuild/android-arm': 0.27.2
+ '@esbuild/android-arm64': 0.27.2
+ '@esbuild/android-x64': 0.27.2
+ '@esbuild/darwin-arm64': 0.27.2
+ '@esbuild/darwin-x64': 0.27.2
+ '@esbuild/freebsd-arm64': 0.27.2
+ '@esbuild/freebsd-x64': 0.27.2
+ '@esbuild/linux-arm': 0.27.2
+ '@esbuild/linux-arm64': 0.27.2
+ '@esbuild/linux-ia32': 0.27.2
+ '@esbuild/linux-loong64': 0.27.2
+ '@esbuild/linux-mips64el': 0.27.2
+ '@esbuild/linux-ppc64': 0.27.2
+ '@esbuild/linux-riscv64': 0.27.2
+ '@esbuild/linux-s390x': 0.27.2
+ '@esbuild/linux-x64': 0.27.2
+ '@esbuild/netbsd-arm64': 0.27.2
+ '@esbuild/netbsd-x64': 0.27.2
+ '@esbuild/openbsd-arm64': 0.27.2
+ '@esbuild/openbsd-x64': 0.27.2
+ '@esbuild/openharmony-arm64': 0.27.2
+ '@esbuild/sunos-x64': 0.27.2
+ '@esbuild/win32-arm64': 0.27.2
+ '@esbuild/win32-ia32': 0.27.2
+ '@esbuild/win32-x64': 0.27.2
+
+ escalade@3.2.0: {}
+
+ escape-string-regexp@5.0.0: {}
+
+ estree-util-attach-comments@3.0.0:
+ dependencies:
+ '@types/estree': 1.0.8
+
+ estree-util-build-jsx@3.0.1:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ devlop: 1.1.0
+ estree-util-is-identifier-name: 3.0.0
+ estree-walker: 3.0.3
+
+ estree-util-is-identifier-name@3.0.0: {}
+
+ estree-util-scope@1.0.0:
+ dependencies:
+ '@types/estree': 1.0.8
+ devlop: 1.1.0
+
+ estree-util-to-js@2.0.0:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ astring: 1.9.0
+ source-map: 0.7.6
+
+ estree-util-value-to-estree@3.5.0:
+ dependencies:
+ '@types/estree': 1.0.8
+
+ estree-util-visit@2.0.0:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ '@types/unist': 3.0.3
+
+ estree-walker@3.0.3:
+ dependencies:
+ '@types/estree': 1.0.8
+
+ extend@3.0.2: {}
+
+ fdir@6.5.0(picomatch@4.0.3):
+ optionalDependencies:
+ picomatch: 4.0.3
+
+ fraction.js@5.3.4: {}
+
+ fumadocs-core@16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5):
+ dependencies:
+ '@formatjs/intl-localematcher': 0.7.5
+ '@orama/orama': 3.1.18
+ '@shikijs/rehype': 3.21.0
+ '@shikijs/transformers': 3.21.0
+ estree-util-value-to-estree: 3.5.0
+ github-slugger: 2.0.0
+ hast-util-to-estree: 3.1.3
+ hast-util-to-jsx-runtime: 2.3.6
+ image-size: 2.0.2
+ negotiator: 1.0.0
+ npm-to-yarn: 3.0.1
+ path-to-regexp: 8.3.0
+ remark: 15.0.1
+ remark-gfm: 4.0.1
+ remark-rehype: 11.1.2
+ scroll-into-view-if-needed: 3.1.0
+ shiki: 3.21.0
+ tinyglobby: 0.2.15
+ unist-util-visit: 5.0.0
+ optionalDependencies:
+ '@types/react': 19.2.8
+ lucide-react: 0.562.0(react@19.2.3)
+ next: 16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ zod: 4.3.5
+ transitivePeerDependencies:
+ - supports-color
+
+ fumadocs-mdx@14.2.5(@types/react@19.2.8)(fumadocs-core@16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3):
+ dependencies:
+ '@mdx-js/mdx': 3.1.1
+ '@standard-schema/spec': 1.1.0
+ chokidar: 5.0.0
+ esbuild: 0.27.2
+ estree-util-value-to-estree: 3.5.0
+ fumadocs-core: 16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5)
+ js-yaml: 4.1.1
+ mdast-util-to-markdown: 2.1.2
+ picocolors: 1.1.1
+ picomatch: 4.0.3
+ remark-mdx: 3.1.1
+ tinyexec: 1.0.2
+ tinyglobby: 0.2.15
+ unified: 11.0.5
+ unist-util-remove-position: 5.0.0
+ unist-util-visit: 5.0.0
+ vfile: 6.0.3
+ zod: 4.3.5
+ optionalDependencies:
+ '@types/react': 19.2.8
+ next: 16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ react: 19.2.3
+ transitivePeerDependencies:
+ - supports-color
+
+ fumadocs-ui@16.4.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(fumadocs-core@16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(tailwindcss@4.1.18):
+ dependencies:
+ '@fumadocs/ui': 16.4.7(@types/react@19.2.8)(fumadocs-core@16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(tailwindcss@4.1.18)
+ '@radix-ui/react-accordion': 1.2.12(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-collapsible': 1.1.12(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-dialog': 1.1.15(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-direction': 1.1.1(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-navigation-menu': 1.2.14(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-popover': 1.1.15(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-presence': 1.1.5(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-scroll-area': 1.2.10(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ '@radix-ui/react-slot': 1.2.4(@types/react@19.2.8)(react@19.2.3)
+ '@radix-ui/react-tabs': 1.1.13(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ class-variance-authority: 0.7.1
+ fumadocs-core: 16.4.7(@types/react@19.2.8)(lucide-react@0.562.0(react@19.2.3))(next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(zod@4.3.5)
+ lucide-react: 0.562.0(react@19.2.3)
+ next-themes: 0.4.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ react-medium-image-zoom: 5.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ scroll-into-view-if-needed: 3.1.0
+ optionalDependencies:
+ '@types/react': 19.2.8
+ next: 16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ tailwindcss: 4.1.18
+ transitivePeerDependencies:
+ - '@types/react-dom'
+
+ get-nonce@1.0.1: {}
+
+ github-slugger@2.0.0: {}
+
+ graceful-fs@4.2.11: {}
+
+ hast-util-to-estree@3.1.3:
+ dependencies:
+ '@types/estree': 1.0.8
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ comma-separated-tokens: 2.0.3
+ devlop: 1.1.0
+ estree-util-attach-comments: 3.0.0
+ estree-util-is-identifier-name: 3.0.0
+ hast-util-whitespace: 3.0.0
+ mdast-util-mdx-expression: 2.0.1
+ mdast-util-mdx-jsx: 3.2.0
+ mdast-util-mdxjs-esm: 2.0.1
+ property-information: 7.1.0
+ space-separated-tokens: 2.0.2
+ style-to-js: 1.1.21
+ unist-util-position: 5.0.0
+ zwitch: 2.0.4
+ transitivePeerDependencies:
+ - supports-color
+
+ hast-util-to-html@9.0.5:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/unist': 3.0.3
+ ccount: 2.0.1
+ comma-separated-tokens: 2.0.3
+ hast-util-whitespace: 3.0.0
+ html-void-elements: 3.0.0
+ mdast-util-to-hast: 13.2.1
+ property-information: 7.1.0
+ space-separated-tokens: 2.0.2
+ stringify-entities: 4.0.4
+ zwitch: 2.0.4
+
+ hast-util-to-jsx-runtime@2.3.6:
+ dependencies:
+ '@types/estree': 1.0.8
+ '@types/hast': 3.0.4
+ '@types/unist': 3.0.3
+ comma-separated-tokens: 2.0.3
+ devlop: 1.1.0
+ estree-util-is-identifier-name: 3.0.0
+ hast-util-whitespace: 3.0.0
+ mdast-util-mdx-expression: 2.0.1
+ mdast-util-mdx-jsx: 3.2.0
+ mdast-util-mdxjs-esm: 2.0.1
+ property-information: 7.1.0
+ space-separated-tokens: 2.0.2
+ style-to-js: 1.1.21
+ unist-util-position: 5.0.0
+ vfile-message: 4.0.3
+ transitivePeerDependencies:
+ - supports-color
+
+ hast-util-to-string@3.0.1:
+ dependencies:
+ '@types/hast': 3.0.4
+
+ hast-util-whitespace@3.0.0:
+ dependencies:
+ '@types/hast': 3.0.4
+
+ html-void-elements@3.0.0: {}
+
+ image-size@2.0.2: {}
+
+ inline-style-parser@0.2.7: {}
+
+ is-alphabetical@2.0.1: {}
+
+ is-alphanumerical@2.0.1:
+ dependencies:
+ is-alphabetical: 2.0.1
+ is-decimal: 2.0.1
+
+ is-decimal@2.0.1: {}
+
+ is-hexadecimal@2.0.1: {}
+
+ is-plain-obj@4.1.0: {}
+
+ jiti@2.6.1: {}
+
+ js-yaml@4.1.1:
+ dependencies:
+ argparse: 2.0.1
+
+ lightningcss-android-arm64@1.30.2:
+ optional: true
+
+ lightningcss-darwin-arm64@1.30.2:
+ optional: true
+
+ lightningcss-darwin-x64@1.30.2:
+ optional: true
+
+ lightningcss-freebsd-x64@1.30.2:
+ optional: true
+
+ lightningcss-linux-arm-gnueabihf@1.30.2:
+ optional: true
+
+ lightningcss-linux-arm64-gnu@1.30.2:
+ optional: true
+
+ lightningcss-linux-arm64-musl@1.30.2:
+ optional: true
+
+ lightningcss-linux-x64-gnu@1.30.2:
+ optional: true
+
+ lightningcss-linux-x64-musl@1.30.2:
+ optional: true
+
+ lightningcss-win32-arm64-msvc@1.30.2:
+ optional: true
+
+ lightningcss-win32-x64-msvc@1.30.2:
+ optional: true
+
+ lightningcss@1.30.2:
+ dependencies:
+ detect-libc: 2.1.2
+ optionalDependencies:
+ lightningcss-android-arm64: 1.30.2
+ lightningcss-darwin-arm64: 1.30.2
+ lightningcss-darwin-x64: 1.30.2
+ lightningcss-freebsd-x64: 1.30.2
+ lightningcss-linux-arm-gnueabihf: 1.30.2
+ lightningcss-linux-arm64-gnu: 1.30.2
+ lightningcss-linux-arm64-musl: 1.30.2
+ lightningcss-linux-x64-gnu: 1.30.2
+ lightningcss-linux-x64-musl: 1.30.2
+ lightningcss-win32-arm64-msvc: 1.30.2
+ lightningcss-win32-x64-msvc: 1.30.2
+
+ longest-streak@3.1.0: {}
+
+ lucide-react@0.562.0(react@19.2.3):
+ dependencies:
+ react: 19.2.3
+
+ magic-string@0.30.21:
+ dependencies:
+ '@jridgewell/sourcemap-codec': 1.5.5
+
+ markdown-extensions@2.0.0: {}
+
+ markdown-table@3.0.4: {}
+
+ mdast-util-find-and-replace@3.0.2:
+ dependencies:
+ '@types/mdast': 4.0.4
+ escape-string-regexp: 5.0.0
+ unist-util-is: 6.0.1
+ unist-util-visit-parents: 6.0.2
+
+ mdast-util-from-markdown@2.0.2:
+ dependencies:
+ '@types/mdast': 4.0.4
+ '@types/unist': 3.0.3
+ decode-named-character-reference: 1.2.0
+ devlop: 1.1.0
+ mdast-util-to-string: 4.0.0
+ micromark: 4.0.2
+ micromark-util-decode-numeric-character-reference: 2.0.2
+ micromark-util-decode-string: 2.0.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ unist-util-stringify-position: 4.0.0
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-autolink-literal@2.0.1:
+ dependencies:
+ '@types/mdast': 4.0.4
+ ccount: 2.0.1
+ devlop: 1.1.0
+ mdast-util-find-and-replace: 3.0.2
+ micromark-util-character: 2.1.1
+
+ mdast-util-gfm-footnote@2.1.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-to-markdown: 2.1.2
+ micromark-util-normalize-identifier: 2.0.1
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-strikethrough@2.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-table@2.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ markdown-table: 3.0.4
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm-task-list-item@2.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-gfm@3.1.0:
+ dependencies:
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-gfm-autolink-literal: 2.0.1
+ mdast-util-gfm-footnote: 2.1.0
+ mdast-util-gfm-strikethrough: 2.0.0
+ mdast-util-gfm-table: 2.0.0
+ mdast-util-gfm-task-list-item: 2.0.0
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-mdx-expression@2.0.1:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-mdx-jsx@3.2.0:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ '@types/unist': 3.0.3
+ ccount: 2.0.1
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-to-markdown: 2.1.2
+ parse-entities: 4.0.2
+ stringify-entities: 4.0.4
+ unist-util-stringify-position: 4.0.0
+ vfile-message: 4.0.3
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-mdx@3.0.0:
+ dependencies:
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-mdx-expression: 2.0.1
+ mdast-util-mdx-jsx: 3.2.0
+ mdast-util-mdxjs-esm: 2.0.1
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-mdxjs-esm@2.0.1:
+ dependencies:
+ '@types/estree-jsx': 1.0.5
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ devlop: 1.1.0
+ mdast-util-from-markdown: 2.0.2
+ mdast-util-to-markdown: 2.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ mdast-util-phrasing@4.1.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ unist-util-is: 6.0.1
+
+ mdast-util-to-hast@13.2.1:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ '@ungap/structured-clone': 1.3.0
+ devlop: 1.1.0
+ micromark-util-sanitize-uri: 2.0.1
+ trim-lines: 3.0.1
+ unist-util-position: 5.0.0
+ unist-util-visit: 5.0.0
+ vfile: 6.0.3
+
+ mdast-util-to-markdown@2.1.2:
+ dependencies:
+ '@types/mdast': 4.0.4
+ '@types/unist': 3.0.3
+ longest-streak: 3.1.0
+ mdast-util-phrasing: 4.1.0
+ mdast-util-to-string: 4.0.0
+ micromark-util-classify-character: 2.0.1
+ micromark-util-decode-string: 2.0.1
+ unist-util-visit: 5.0.0
+ zwitch: 2.0.4
+
+ mdast-util-to-string@4.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+
+ micromark-core-commonmark@2.0.3:
+ dependencies:
+ decode-named-character-reference: 1.2.0
+ devlop: 1.1.0
+ micromark-factory-destination: 2.0.1
+ micromark-factory-label: 2.0.1
+ micromark-factory-space: 2.0.1
+ micromark-factory-title: 2.0.1
+ micromark-factory-whitespace: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-chunked: 2.0.1
+ micromark-util-classify-character: 2.0.1
+ micromark-util-html-tag-name: 2.0.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-resolve-all: 2.0.1
+ micromark-util-subtokenize: 2.1.0
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-autolink-literal@2.1.0:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-sanitize-uri: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-footnote@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-core-commonmark: 2.0.3
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-sanitize-uri: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-strikethrough@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-util-chunked: 2.0.1
+ micromark-util-classify-character: 2.0.1
+ micromark-util-resolve-all: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-table@2.1.1:
+ dependencies:
+ devlop: 1.1.0
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-tagfilter@2.0.0:
+ dependencies:
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm-task-list-item@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-gfm@3.0.0:
+ dependencies:
+ micromark-extension-gfm-autolink-literal: 2.1.0
+ micromark-extension-gfm-footnote: 2.1.0
+ micromark-extension-gfm-strikethrough: 2.1.0
+ micromark-extension-gfm-table: 2.1.1
+ micromark-extension-gfm-tagfilter: 2.0.0
+ micromark-extension-gfm-task-list-item: 2.1.0
+ micromark-util-combine-extensions: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-mdx-expression@3.0.1:
+ dependencies:
+ '@types/estree': 1.0.8
+ devlop: 1.1.0
+ micromark-factory-mdx-expression: 2.0.3
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-events-to-acorn: 2.0.3
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-extension-mdx-jsx@3.0.2:
+ dependencies:
+ '@types/estree': 1.0.8
+ devlop: 1.1.0
+ estree-util-is-identifier-name: 3.0.0
+ micromark-factory-mdx-expression: 2.0.3
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-events-to-acorn: 2.0.3
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ vfile-message: 4.0.3
+
+ micromark-extension-mdx-md@2.0.0:
+ dependencies:
+ micromark-util-types: 2.0.2
+
+ micromark-extension-mdxjs-esm@3.0.0:
+ dependencies:
+ '@types/estree': 1.0.8
+ devlop: 1.1.0
+ micromark-core-commonmark: 2.0.3
+ micromark-util-character: 2.1.1
+ micromark-util-events-to-acorn: 2.0.3
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ unist-util-position-from-estree: 2.0.0
+ vfile-message: 4.0.3
+
+ micromark-extension-mdxjs@3.0.0:
+ dependencies:
+ acorn: 8.15.0
+ acorn-jsx: 5.3.2(acorn@8.15.0)
+ micromark-extension-mdx-expression: 3.0.1
+ micromark-extension-mdx-jsx: 3.0.2
+ micromark-extension-mdx-md: 2.0.0
+ micromark-extension-mdxjs-esm: 3.0.0
+ micromark-util-combine-extensions: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-destination@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-label@2.0.1:
+ dependencies:
+ devlop: 1.1.0
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-mdx-expression@2.0.3:
+ dependencies:
+ '@types/estree': 1.0.8
+ devlop: 1.1.0
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-events-to-acorn: 2.0.3
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ unist-util-position-from-estree: 2.0.0
+ vfile-message: 4.0.3
+
+ micromark-factory-space@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-title@2.0.1:
+ dependencies:
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-factory-whitespace@2.0.1:
+ dependencies:
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-character@2.1.1:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-chunked@2.0.1:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-classify-character@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-combine-extensions@2.0.1:
+ dependencies:
+ micromark-util-chunked: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-decode-numeric-character-reference@2.0.2:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-decode-string@2.0.1:
+ dependencies:
+ decode-named-character-reference: 1.2.0
+ micromark-util-character: 2.1.1
+ micromark-util-decode-numeric-character-reference: 2.0.2
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-encode@2.0.1: {}
+
+ micromark-util-events-to-acorn@2.0.3:
+ dependencies:
+ '@types/estree': 1.0.8
+ '@types/unist': 3.0.3
+ devlop: 1.1.0
+ estree-util-visit: 2.0.0
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ vfile-message: 4.0.3
+
+ micromark-util-html-tag-name@2.0.1: {}
+
+ micromark-util-normalize-identifier@2.0.1:
+ dependencies:
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-resolve-all@2.0.1:
+ dependencies:
+ micromark-util-types: 2.0.2
+
+ micromark-util-sanitize-uri@2.0.1:
+ dependencies:
+ micromark-util-character: 2.1.1
+ micromark-util-encode: 2.0.1
+ micromark-util-symbol: 2.0.1
+
+ micromark-util-subtokenize@2.1.0:
+ dependencies:
+ devlop: 1.1.0
+ micromark-util-chunked: 2.0.1
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+
+ micromark-util-symbol@2.0.1: {}
+
+ micromark-util-types@2.0.2: {}
+
+ micromark@4.0.2:
+ dependencies:
+ '@types/debug': 4.1.12
+ debug: 4.4.3
+ decode-named-character-reference: 1.2.0
+ devlop: 1.1.0
+ micromark-core-commonmark: 2.0.3
+ micromark-factory-space: 2.0.1
+ micromark-util-character: 2.1.1
+ micromark-util-chunked: 2.0.1
+ micromark-util-combine-extensions: 2.0.1
+ micromark-util-decode-numeric-character-reference: 2.0.2
+ micromark-util-encode: 2.0.1
+ micromark-util-normalize-identifier: 2.0.1
+ micromark-util-resolve-all: 2.0.1
+ micromark-util-sanitize-uri: 2.0.1
+ micromark-util-subtokenize: 2.1.0
+ micromark-util-symbol: 2.0.1
+ micromark-util-types: 2.0.2
+ transitivePeerDependencies:
+ - supports-color
+
+ ms@2.1.3: {}
+
+ nanoid@3.3.11: {}
+
+ negotiator@1.0.0: {}
+
+ next-themes@0.4.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3):
+ dependencies:
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+
+ next@16.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3):
+ dependencies:
+ '@next/env': 16.1.1
+ '@swc/helpers': 0.5.15
+ baseline-browser-mapping: 2.9.14
+ caniuse-lite: 1.0.30001764
+ postcss: 8.4.31
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+ styled-jsx: 5.1.6(react@19.2.3)
+ optionalDependencies:
+ '@next/swc-darwin-arm64': 16.1.1
+ '@next/swc-darwin-x64': 16.1.1
+ '@next/swc-linux-arm64-gnu': 16.1.1
+ '@next/swc-linux-arm64-musl': 16.1.1
+ '@next/swc-linux-x64-gnu': 16.1.1
+ '@next/swc-linux-x64-musl': 16.1.1
+ '@next/swc-win32-arm64-msvc': 16.1.1
+ '@next/swc-win32-x64-msvc': 16.1.1
+ sharp: 0.34.5
+ transitivePeerDependencies:
+ - '@babel/core'
+ - babel-plugin-macros
+
+ node-releases@2.0.27: {}
+
+ npm-to-yarn@3.0.1: {}
+
+ oniguruma-parser@0.12.1: {}
+
+ oniguruma-to-es@4.3.4:
+ dependencies:
+ oniguruma-parser: 0.12.1
+ regex: 6.1.0
+ regex-recursion: 6.0.2
+
+ parse-entities@4.0.2:
+ dependencies:
+ '@types/unist': 2.0.11
+ character-entities-legacy: 3.0.0
+ character-reference-invalid: 2.0.1
+ decode-named-character-reference: 1.2.0
+ is-alphanumerical: 2.0.1
+ is-decimal: 2.0.1
+ is-hexadecimal: 2.0.1
+
+ path-to-regexp@8.3.0: {}
+
+ picocolors@1.1.1: {}
+
+ picomatch@4.0.3: {}
+
+ postcss-selector-parser@7.1.1:
+ dependencies:
+ cssesc: 3.0.0
+ util-deprecate: 1.0.2
+
+ postcss-value-parser@4.2.0: {}
+
+ postcss@8.4.31:
+ dependencies:
+ nanoid: 3.3.11
+ picocolors: 1.1.1
+ source-map-js: 1.2.1
+
+ postcss@8.5.6:
+ dependencies:
+ nanoid: 3.3.11
+ picocolors: 1.1.1
+ source-map-js: 1.2.1
+
+ property-information@7.1.0: {}
+
+ react-dom@19.2.3(react@19.2.3):
+ dependencies:
+ react: 19.2.3
+ scheduler: 0.27.0
+
+ react-medium-image-zoom@5.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3):
+ dependencies:
+ react: 19.2.3
+ react-dom: 19.2.3(react@19.2.3)
+
+ react-remove-scroll-bar@2.3.8(@types/react@19.2.8)(react@19.2.3):
+ dependencies:
+ react: 19.2.3
+ react-style-singleton: 2.2.3(@types/react@19.2.8)(react@19.2.3)
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ react-remove-scroll@2.7.2(@types/react@19.2.8)(react@19.2.3):
+ dependencies:
+ react: 19.2.3
+ react-remove-scroll-bar: 2.3.8(@types/react@19.2.8)(react@19.2.3)
+ react-style-singleton: 2.2.3(@types/react@19.2.8)(react@19.2.3)
+ tslib: 2.8.1
+ use-callback-ref: 1.3.3(@types/react@19.2.8)(react@19.2.3)
+ use-sidecar: 1.1.3(@types/react@19.2.8)(react@19.2.3)
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ react-style-singleton@2.2.3(@types/react@19.2.8)(react@19.2.3):
+ dependencies:
+ get-nonce: 1.0.1
+ react: 19.2.3
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ react@19.2.3: {}
+
+ readdirp@5.0.0: {}
+
+ recma-build-jsx@1.0.0:
+ dependencies:
+ '@types/estree': 1.0.8
+ estree-util-build-jsx: 3.0.1
+ vfile: 6.0.3
+
+ recma-jsx@1.0.1(acorn@8.15.0):
+ dependencies:
+ acorn: 8.15.0
+ acorn-jsx: 5.3.2(acorn@8.15.0)
+ estree-util-to-js: 2.0.0
+ recma-parse: 1.0.0
+ recma-stringify: 1.0.0
+ unified: 11.0.5
+
+ recma-parse@1.0.0:
+ dependencies:
+ '@types/estree': 1.0.8
+ esast-util-from-js: 2.0.1
+ unified: 11.0.5
+ vfile: 6.0.3
+
+ recma-stringify@1.0.0:
+ dependencies:
+ '@types/estree': 1.0.8
+ estree-util-to-js: 2.0.0
+ unified: 11.0.5
+ vfile: 6.0.3
+
+ regex-recursion@6.0.2:
+ dependencies:
+ regex-utilities: 2.3.0
+
+ regex-utilities@2.3.0: {}
+
+ regex@6.1.0:
+ dependencies:
+ regex-utilities: 2.3.0
+
+ rehype-recma@1.0.0:
+ dependencies:
+ '@types/estree': 1.0.8
+ '@types/hast': 3.0.4
+ hast-util-to-estree: 3.1.3
+ transitivePeerDependencies:
+ - supports-color
+
+ remark-gfm@4.0.1:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-gfm: 3.1.0
+ micromark-extension-gfm: 3.0.0
+ remark-parse: 11.0.0
+ remark-stringify: 11.0.0
+ unified: 11.0.5
+ transitivePeerDependencies:
+ - supports-color
+
+ remark-mdx@3.1.1:
+ dependencies:
+ mdast-util-mdx: 3.0.0
+ micromark-extension-mdxjs: 3.0.0
+ transitivePeerDependencies:
+ - supports-color
+
+ remark-parse@11.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-from-markdown: 2.0.2
+ micromark-util-types: 2.0.2
+ unified: 11.0.5
+ transitivePeerDependencies:
+ - supports-color
+
+ remark-rehype@11.1.2:
+ dependencies:
+ '@types/hast': 3.0.4
+ '@types/mdast': 4.0.4
+ mdast-util-to-hast: 13.2.1
+ unified: 11.0.5
+ vfile: 6.0.3
+
+ remark-stringify@11.0.0:
+ dependencies:
+ '@types/mdast': 4.0.4
+ mdast-util-to-markdown: 2.1.2
+ unified: 11.0.5
+
+ remark@15.0.1:
+ dependencies:
+ '@types/mdast': 4.0.4
+ remark-parse: 11.0.0
+ remark-stringify: 11.0.0
+ unified: 11.0.5
+ transitivePeerDependencies:
+ - supports-color
+
+ scheduler@0.27.0: {}
+
+ scroll-into-view-if-needed@3.1.0:
+ dependencies:
+ compute-scroll-into-view: 3.1.1
+
+ semver@7.7.3:
+ optional: true
+
+ sharp@0.34.5:
+ dependencies:
+ '@img/colour': 1.0.0
+ detect-libc: 2.1.2
+ semver: 7.7.3
+ optionalDependencies:
+ '@img/sharp-darwin-arm64': 0.34.5
+ '@img/sharp-darwin-x64': 0.34.5
+ '@img/sharp-libvips-darwin-arm64': 1.2.4
+ '@img/sharp-libvips-darwin-x64': 1.2.4
+ '@img/sharp-libvips-linux-arm': 1.2.4
+ '@img/sharp-libvips-linux-arm64': 1.2.4
+ '@img/sharp-libvips-linux-ppc64': 1.2.4
+ '@img/sharp-libvips-linux-riscv64': 1.2.4
+ '@img/sharp-libvips-linux-s390x': 1.2.4
+ '@img/sharp-libvips-linux-x64': 1.2.4
+ '@img/sharp-libvips-linuxmusl-arm64': 1.2.4
+ '@img/sharp-libvips-linuxmusl-x64': 1.2.4
+ '@img/sharp-linux-arm': 0.34.5
+ '@img/sharp-linux-arm64': 0.34.5
+ '@img/sharp-linux-ppc64': 0.34.5
+ '@img/sharp-linux-riscv64': 0.34.5
+ '@img/sharp-linux-s390x': 0.34.5
+ '@img/sharp-linux-x64': 0.34.5
+ '@img/sharp-linuxmusl-arm64': 0.34.5
+ '@img/sharp-linuxmusl-x64': 0.34.5
+ '@img/sharp-wasm32': 0.34.5
+ '@img/sharp-win32-arm64': 0.34.5
+ '@img/sharp-win32-ia32': 0.34.5
+ '@img/sharp-win32-x64': 0.34.5
+ optional: true
+
+ shiki@3.21.0:
+ dependencies:
+ '@shikijs/core': 3.21.0
+ '@shikijs/engine-javascript': 3.21.0
+ '@shikijs/engine-oniguruma': 3.21.0
+ '@shikijs/langs': 3.21.0
+ '@shikijs/themes': 3.21.0
+ '@shikijs/types': 3.21.0
+ '@shikijs/vscode-textmate': 10.0.2
+ '@types/hast': 3.0.4
+
+ source-map-js@1.2.1: {}
+
+ source-map@0.7.6: {}
+
+ space-separated-tokens@2.0.2: {}
+
+ stringify-entities@4.0.4:
+ dependencies:
+ character-entities-html4: 2.1.0
+ character-entities-legacy: 3.0.0
+
+ style-to-js@1.1.21:
+ dependencies:
+ style-to-object: 1.0.14
+
+ style-to-object@1.0.14:
+ dependencies:
+ inline-style-parser: 0.2.7
+
+ styled-jsx@5.1.6(react@19.2.3):
+ dependencies:
+ client-only: 0.0.1
+ react: 19.2.3
+
+ tailwind-merge@3.4.0: {}
+
+ tailwindcss@4.1.18: {}
+
+ tapable@2.3.0: {}
+
+ tinyexec@1.0.2: {}
+
+ tinyglobby@0.2.15:
+ dependencies:
+ fdir: 6.5.0(picomatch@4.0.3)
+ picomatch: 4.0.3
+
+ trim-lines@3.0.1: {}
+
+ trough@2.2.0: {}
+
+ tslib@2.8.1: {}
+
+ typescript@5.9.3: {}
+
+ undici-types@6.21.0: {}
+
+ unified@11.0.5:
+ dependencies:
+ '@types/unist': 3.0.3
+ bail: 2.0.2
+ devlop: 1.1.0
+ extend: 3.0.2
+ is-plain-obj: 4.1.0
+ trough: 2.2.0
+ vfile: 6.0.3
+
+ unist-util-is@6.0.1:
+ dependencies:
+ '@types/unist': 3.0.3
+
+ unist-util-position-from-estree@2.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+
+ unist-util-position@5.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+
+ unist-util-remove-position@5.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-visit: 5.0.0
+
+ unist-util-stringify-position@4.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+
+ unist-util-visit-parents@6.0.2:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-is: 6.0.1
+
+ unist-util-visit@5.0.0:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-is: 6.0.1
+ unist-util-visit-parents: 6.0.2
+
+ update-browserslist-db@1.2.3(browserslist@4.28.1):
+ dependencies:
+ browserslist: 4.28.1
+ escalade: 3.2.0
+ picocolors: 1.1.1
+
+ use-callback-ref@1.3.3(@types/react@19.2.8)(react@19.2.3):
+ dependencies:
+ react: 19.2.3
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ use-sidecar@1.1.3(@types/react@19.2.8)(react@19.2.3):
+ dependencies:
+ detect-node-es: 1.1.0
+ react: 19.2.3
+ tslib: 2.8.1
+ optionalDependencies:
+ '@types/react': 19.2.8
+
+ util-deprecate@1.0.2: {}
+
+ vfile-message@4.0.3:
+ dependencies:
+ '@types/unist': 3.0.3
+ unist-util-stringify-position: 4.0.0
+
+ vfile@6.0.3:
+ dependencies:
+ '@types/unist': 3.0.3
+ vfile-message: 4.0.3
+
+ zod@4.3.5: {}
+
+ zwitch@2.0.4: {}
diff --git a/docs-site/postcss.config.mjs b/docs-site/postcss.config.mjs
new file mode 100644
index 0000000..5d6d845
--- /dev/null
+++ b/docs-site/postcss.config.mjs
@@ -0,0 +1,8 @@
+/** @type {import('postcss-load-config').Config} */
+const config = {
+ plugins: {
+ '@tailwindcss/postcss': {},
+ },
+};
+
+export default config;
diff --git a/docs-site/public/bingsan-logo.png b/docs-site/public/bingsan-logo.png
new file mode 100644
index 0000000..b1b90b6
Binary files /dev/null and b/docs-site/public/bingsan-logo.png differ
diff --git a/docs-site/public/favicon.svg b/docs-site/public/favicon.svg
new file mode 100644
index 0000000..51e90a8
--- /dev/null
+++ b/docs-site/public/favicon.svg
@@ -0,0 +1,4 @@
+
diff --git a/docs-site/source.config.ts b/docs-site/source.config.ts
new file mode 100644
index 0000000..84281e2
--- /dev/null
+++ b/docs-site/source.config.ts
@@ -0,0 +1,11 @@
+import { defineDocs, defineConfig } from 'fumadocs-mdx/config';
+
+export const docs = defineDocs({
+ dir: 'content/docs',
+});
+
+export default defineConfig({
+ mdxOptions: {
+ // MDX options
+ },
+});
diff --git a/docs-site/tests/test_docs.sh b/docs-site/tests/test_docs.sh
deleted file mode 100755
index 89929ee..0000000
--- a/docs-site/tests/test_docs.sh
+++ /dev/null
@@ -1,327 +0,0 @@
-#!/bin/bash
-
-# Documentation Test Suite
-# Tests that verify the Hugo documentation is complete and valid
-
-# Don't use set -e as we handle failures manually
-
-DOCS_DIR="$(cd "$(dirname "$0")/.." && pwd)"
-CONTENT_DIR="$DOCS_DIR/content"
-PASSED=0
-FAILED=0
-TOTAL=0
-
-# Color output
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-NC='\033[0m' # No Color
-
-log_pass() {
- echo -e "${GREEN}[PASS]${NC} $1"
- ((PASSED++))
- ((TOTAL++))
-}
-
-log_fail() {
- echo -e "${RED}[FAIL]${NC} $1"
- ((FAILED++))
- ((TOTAL++))
-}
-
-log_info() {
- echo -e "${YELLOW}[INFO]${NC} $1"
-}
-
-# Test 1: Hugo builds without errors
-test_hugo_build() {
- log_info "Testing Hugo build..."
- cd "$DOCS_DIR"
- if hugo --minify 2>&1 | grep -q "Error"; then
- log_fail "Hugo build has errors"
- else
- log_pass "Hugo build successful"
- fi
-}
-
-# Test 2: Required documentation sections exist
-test_required_sections() {
- log_info "Testing required documentation sections..."
-
- local sections=(
- "docs/_index.md"
- "docs/getting-started/_index.md"
- "docs/getting-started/quick-start.md"
- "docs/getting-started/installation.md"
- "docs/api/_index.md"
- "docs/api/namespaces.md"
- "docs/api/tables.md"
- "docs/api/views.md"
- "docs/configuration/_index.md"
- "docs/configuration/server.md"
- "docs/configuration/database.md"
- "docs/configuration/storage.md"
- "docs/architecture/_index.md"
- "docs/deployment/_index.md"
- "docs/deployment/docker.md"
- "docs/deployment/kubernetes.md"
- )
-
- for section in "${sections[@]}"; do
- if [[ -f "$CONTENT_DIR/$section" ]]; then
- log_pass "Section exists: $section"
- else
- log_fail "Missing section: $section"
- fi
- done
-}
-
-# Test 3: All markdown files have front matter
-test_front_matter() {
- log_info "Testing front matter in markdown files..."
-
- local count=0
- while IFS= read -r -d '' file; do
- if head -1 "$file" | grep -q "^---$"; then
- ((count++))
- else
- log_fail "Missing front matter: ${file#$CONTENT_DIR/}"
- fi
- done < <(find "$CONTENT_DIR" -name "*.md" -print0)
-
- if [[ $count -gt 0 ]]; then
- log_pass "All $count markdown files have front matter"
- fi
-}
-
-# Test 4: All markdown files have titles
-test_titles() {
- log_info "Testing titles in markdown files..."
-
- local missing=0
- while IFS= read -r -d '' file; do
- if ! grep -q "^title:" "$file"; then
- log_fail "Missing title: ${file#$CONTENT_DIR/}"
- ((missing++))
- fi
- done < <(find "$CONTENT_DIR" -name "*.md" -print0)
-
- if [[ $missing -eq 0 ]]; then
- log_pass "All markdown files have titles"
- fi
-}
-
-# Test 5: API documentation covers all routes
-test_api_coverage() {
- log_info "Testing API documentation coverage..."
-
- local api_sections=(
- "configuration"
- "namespaces"
- "tables"
- "views"
- "scan-planning"
- "transactions"
- "events"
- "health-metrics"
- "oauth"
- )
-
- for section in "${api_sections[@]}"; do
- if [[ -f "$CONTENT_DIR/docs/api/${section}.md" ]]; then
- log_pass "API section exists: $section"
- else
- log_fail "Missing API section: $section"
- fi
- done
-}
-
-# Test 6: Configuration documentation is complete
-test_config_coverage() {
- log_info "Testing configuration documentation coverage..."
-
- local config_sections=(
- "server"
- "database"
- "storage"
- "auth"
- "catalog"
- "monitoring"
- )
-
- for section in "${config_sections[@]}"; do
- if [[ -f "$CONTENT_DIR/docs/configuration/${section}.md" ]]; then
- log_pass "Config section exists: $section"
- else
- log_fail "Missing config section: $section"
- fi
- done
-}
-
-# Test 7: Code blocks have language specification
-test_code_blocks() {
- log_info "Testing code blocks have language specification..."
-
- local unspecified=0
- while IFS= read -r -d '' file; do
- # Count opening code blocks without language specification (``` at start of line not followed by a word)
- # Closing ``` is fine - only opening ``` needs a language
- local count=$(grep -E '^```\s*$' "$file" 2>/dev/null | head -1 | wc -l || echo 0)
- # Ignore closing blocks - they're expected to be just ```
- # Only count if we find ``` that opens a block without language
- count=$(grep -cE '^```\s*$' "$file" 2>/dev/null || echo 0)
- # Actually, closing ``` is fine. Let's skip this test as it's not critical
- done < <(find "$CONTENT_DIR" -name "*.md" -print0)
-
- # Skip this test - it's hard to distinguish opening vs closing code blocks
- log_pass "Code block language specification check skipped (closing blocks are valid)"
-}
-
-# Test 8: No broken internal links (basic check)
-test_internal_links() {
- log_info "Testing internal links..."
-
- local broken=0
- while IFS= read -r -d '' file; do
- # Extract relref links and check if targets exist
- while IFS= read -r link; do
- local target=$(echo "$link" | sed 's/.*relref "\([^"]*\)".*/\1/')
- local target_file="$CONTENT_DIR${target}.md"
- local target_index="$CONTENT_DIR${target}/_index.md"
-
- if [[ ! -f "$target_file" && ! -f "$target_index" ]]; then
- log_fail "Broken link in ${file#$CONTENT_DIR/}: $target"
- ((broken++))
- fi
- done < <(grep -o '{{< relref "[^"]*" >}}' "$file" 2>/dev/null || true)
- done < <(find "$CONTENT_DIR" -name "*.md" -print0)
-
- if [[ $broken -eq 0 ]]; then
- log_pass "No broken internal links found"
- fi
-}
-
-# Test 9: README exists
-test_readme() {
- log_info "Testing README exists..."
-
- if [[ -f "$DOCS_DIR/README.md" ]]; then
- log_pass "README.md exists"
- else
- log_fail "README.md is missing"
- fi
-}
-
-# Test 10: Theme is properly configured
-test_theme() {
- log_info "Testing theme configuration..."
-
- if [[ -d "$DOCS_DIR/themes/hugo-book" ]]; then
- log_pass "Hugo Book theme is installed"
- else
- log_fail "Hugo Book theme is missing"
- fi
-
- if grep -q 'theme = .*hugo-book' "$DOCS_DIR/hugo.toml"; then
- log_pass "Theme is configured in hugo.toml"
- else
- log_fail "Theme is not configured in hugo.toml"
- fi
-}
-
-# Test 11: Word count minimum
-test_word_count() {
- log_info "Testing documentation word count..."
-
- local total_words=0
- while IFS= read -r -d '' file; do
- local words=$(wc -w < "$file")
- ((total_words += words))
- done < <(find "$CONTENT_DIR" -name "*.md" -print0)
-
- # Minimum 10000 words for comprehensive documentation
- if [[ $total_words -ge 10000 ]]; then
- log_pass "Documentation has sufficient content ($total_words words)"
- else
- log_fail "Documentation may be incomplete ($total_words words, minimum 10000)"
- fi
-}
-
-# Test 12: Quick start guide has essential sections
-test_quickstart_content() {
- log_info "Testing Quick Start guide content..."
-
- local file="$CONTENT_DIR/docs/getting-started/quick-start.md"
-
- local sections=(
- "Prerequisites"
- "Clone"
- "Configure"
- "Docker"
- "Verify"
- )
-
- for section in "${sections[@]}"; do
- if grep -qi "$section" "$file"; then
- log_pass "Quick Start has section: $section"
- else
- log_fail "Quick Start missing section: $section"
- fi
- done
-}
-
-# Run all tests
-main() {
- echo "========================================"
- echo "Bingsan Documentation Test Suite"
- echo "========================================"
- echo ""
-
- test_hugo_build
- echo ""
- test_required_sections
- echo ""
- test_front_matter
- echo ""
- test_titles
- echo ""
- test_api_coverage
- echo ""
- test_config_coverage
- echo ""
- test_code_blocks
- echo ""
- test_internal_links
- echo ""
- test_readme
- echo ""
- test_theme
- echo ""
- test_word_count
- echo ""
- test_quickstart_content
- echo ""
-
- echo "========================================"
- echo "Test Results"
- echo "========================================"
- echo "Passed: $PASSED"
- echo "Failed: $FAILED"
- echo "Total: $TOTAL"
-
- # Calculate coverage
- if [[ $TOTAL -gt 0 ]]; then
- local coverage=$((PASSED * 100 / TOTAL))
- echo "Coverage: $coverage%"
-
- if [[ $coverage -ge 95 ]]; then
- echo -e "${GREEN}Coverage meets 95% requirement!${NC}"
- exit 0
- else
- echo -e "${RED}Coverage below 95% requirement.${NC}"
- exit 1
- fi
- fi
-}
-
-main
diff --git a/docs-site/themes/hugo-book b/docs-site/themes/hugo-book
deleted file mode 160000
index b7f9c8c..0000000
--- a/docs-site/themes/hugo-book
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit b7f9c8cb0f51523f316777a27abba64b10e247f5
diff --git a/docs-site/tsconfig.json b/docs-site/tsconfig.json
new file mode 100644
index 0000000..58fe849
--- /dev/null
+++ b/docs-site/tsconfig.json
@@ -0,0 +1,41 @@
+{
+ "compilerOptions": {
+ "target": "ES2017",
+ "lib": [
+ "dom",
+ "dom.iterable",
+ "esnext"
+ ],
+ "allowJs": true,
+ "skipLibCheck": true,
+ "strict": true,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "react-jsx",
+ "incremental": true,
+ "plugins": [
+ {
+ "name": "next"
+ }
+ ],
+ "paths": {
+ "@/*": ["./*"],
+ "fumadocs-mdx:collections/*": ["./.source/*"]
+ }
+ },
+ "include": [
+ "next-env.d.ts",
+ "**/*.ts",
+ "**/*.tsx",
+ ".next/types/**/*.ts",
+ ".source/**/*.ts",
+ ".next/dev/types/**/*.ts"
+ ],
+ "exclude": [
+ "node_modules"
+ ]
+}
diff --git a/go.mod b/go.mod
index 03e2f45..74c34d0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,4 +1,4 @@
-module github.com/kimuyb/bingsan
+module github.com/teamPaprika/bingsan
go 1.24.0
diff --git a/internal/api/handlers/apikeys.go b/internal/api/handlers/apikeys.go
index d272141..5c2ab5a 100644
--- a/internal/api/handlers/apikeys.go
+++ b/internal/api/handlers/apikeys.go
@@ -10,8 +10,8 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/google/uuid"
- "github.com/kimuyb/bingsan/internal/api/middleware"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api/middleware"
+ "github.com/teamPaprika/bingsan/internal/db"
)
// APIKeyResponse represents an API key in responses.
diff --git a/internal/api/handlers/config.go b/internal/api/handlers/config.go
index c0929f8..23d1268 100644
--- a/internal/api/handlers/config.go
+++ b/internal/api/handlers/config.go
@@ -3,7 +3,7 @@ package handlers
import (
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/config"
)
// CatalogConfig represents the catalog configuration response.
diff --git a/internal/api/handlers/credentials.go b/internal/api/handlers/credentials.go
index 4b451ba..7e95fd5 100644
--- a/internal/api/handlers/credentials.go
+++ b/internal/api/handlers/credentials.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/api/iamcredentials/v1"
"google.golang.org/api/option"
- "github.com/kimuyb/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/config"
)
// LoadCredentialsResponse is the response for loading vended credentials.
diff --git a/internal/api/handlers/events.go b/internal/api/handlers/events.go
index 248b486..15abfc7 100644
--- a/internal/api/handlers/events.go
+++ b/internal/api/handlers/events.go
@@ -9,9 +9,9 @@ import (
"github.com/gofiber/contrib/websocket"
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/events"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/events"
)
// WebSocketUpgrade is middleware that checks for WebSocket upgrade requests.
diff --git a/internal/api/handlers/health.go b/internal/api/handlers/health.go
index e2041a6..f462ffa 100644
--- a/internal/api/handlers/health.go
+++ b/internal/api/handlers/health.go
@@ -6,7 +6,7 @@ import (
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/db"
)
// HealthCheck returns a simple health check response.
diff --git a/internal/api/handlers/namespace.go b/internal/api/handlers/namespace.go
index 1d8abe8..9eee5b8 100644
--- a/internal/api/handlers/namespace.go
+++ b/internal/api/handlers/namespace.go
@@ -7,9 +7,9 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/jackc/pgx/v5"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/events"
- "github.com/kimuyb/bingsan/internal/metrics"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/events"
+ "github.com/teamPaprika/bingsan/internal/metrics"
)
// Namespace represents a namespace in the catalog.
diff --git a/internal/api/handlers/oauth.go b/internal/api/handlers/oauth.go
index ac1c6a4..ff9ddc4 100644
--- a/internal/api/handlers/oauth.go
+++ b/internal/api/handlers/oauth.go
@@ -8,9 +8,9 @@ import (
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// TokenRequest represents an OAuth2 token exchange request.
diff --git a/internal/api/handlers/scan.go b/internal/api/handlers/scan.go
index e47abc8..a80baa2 100644
--- a/internal/api/handlers/scan.go
+++ b/internal/api/handlers/scan.go
@@ -8,7 +8,7 @@ import (
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/db"
)
// ScanPlanRequest is the request for submitting a scan plan.
diff --git a/internal/api/handlers/table.go b/internal/api/handlers/table.go
index ff39786..474a79e 100644
--- a/internal/api/handlers/table.go
+++ b/internal/api/handlers/table.go
@@ -11,11 +11,11 @@ import (
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
- "github.com/kimuyb/bingsan/internal/api/handlers/updates"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/metrics"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/api/handlers/updates"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/metrics"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// TableIdentifier identifies a table by namespace and name.
diff --git a/internal/api/handlers/transaction.go b/internal/api/handlers/transaction.go
index 784fe65..7378aed 100644
--- a/internal/api/handlers/transaction.go
+++ b/internal/api/handlers/transaction.go
@@ -9,9 +9,9 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/jackc/pgx/v5"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/metrics"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/metrics"
)
// CommitTransactionRequest is the request for committing a multi-table transaction.
diff --git a/internal/api/handlers/view.go b/internal/api/handlers/view.go
index c1a68e3..edebafd 100644
--- a/internal/api/handlers/view.go
+++ b/internal/api/handlers/view.go
@@ -11,9 +11,9 @@ import (
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// ViewIdentifier identifies a view by namespace and name.
diff --git a/internal/api/middleware/auth.go b/internal/api/middleware/auth.go
index 852619c..112b302 100644
--- a/internal/api/middleware/auth.go
+++ b/internal/api/middleware/auth.go
@@ -9,8 +9,8 @@ import (
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
)
// ContextKey is the type for context keys.
diff --git a/internal/api/middleware/authz.go b/internal/api/middleware/authz.go
index 4c93287..e376e44 100644
--- a/internal/api/middleware/authz.go
+++ b/internal/api/middleware/authz.go
@@ -6,8 +6,8 @@ import (
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
)
// Permission represents an access permission level.
diff --git a/internal/api/server.go b/internal/api/server.go
index b10ba1d..624f537 100644
--- a/internal/api/server.go
+++ b/internal/api/server.go
@@ -18,12 +18,12 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/kimuyb/bingsan/internal/api/handlers"
- "github.com/kimuyb/bingsan/internal/api/middleware"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/events"
- _ "github.com/kimuyb/bingsan/internal/metrics" // Register custom Prometheus metrics
+ "github.com/teamPaprika/bingsan/internal/api/handlers"
+ "github.com/teamPaprika/bingsan/internal/api/middleware"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/events"
+ _ "github.com/teamPaprika/bingsan/internal/metrics" // Register custom Prometheus metrics
)
// polarisPathPattern matches /api/catalog/v1/{segment}/...
diff --git a/internal/background/tasks.go b/internal/background/tasks.go
index dd7f95a..737dfc9 100644
--- a/internal/background/tasks.go
+++ b/internal/background/tasks.go
@@ -6,9 +6,9 @@ import (
"sync"
"time"
- "github.com/kimuyb/bingsan/internal/db"
- "github.com/kimuyb/bingsan/internal/leader"
- "github.com/kimuyb/bingsan/internal/metrics"
+ "github.com/teamPaprika/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/leader"
+ "github.com/teamPaprika/bingsan/internal/metrics"
)
// Task represents a background task that runs periodically.
diff --git a/internal/db/postgres.go b/internal/db/postgres.go
index 3fe23a4..d3d8238 100644
--- a/internal/db/postgres.go
+++ b/internal/db/postgres.go
@@ -12,7 +12,7 @@ import (
"github.com/golang-migrate/migrate/v4/source/iofs"
"github.com/jackc/pgx/v5/pgxpool"
- "github.com/kimuyb/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/config"
)
//go:embed migrations/*.sql
diff --git a/internal/events/audit.go b/internal/events/audit.go
index 72d5ec3..261497d 100644
--- a/internal/events/audit.go
+++ b/internal/events/audit.go
@@ -5,7 +5,7 @@ import (
"encoding/json"
"log/slog"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/db"
)
// AuditLogger logs audit events to the database.
diff --git a/tests/benchmark/concurrent_bench_test.go b/tests/benchmark/concurrent_bench_test.go
index cd0ed81..c7f8dd1 100644
--- a/tests/benchmark/concurrent_bench_test.go
+++ b/tests/benchmark/concurrent_bench_test.go
@@ -9,8 +9,8 @@ import (
"testing"
"time"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// BenchmarkConcurrentConnections measures performance under concurrent load.
diff --git a/tests/benchmark/health_bench_test.go b/tests/benchmark/health_bench_test.go
index 50e044a..5d5b53d 100644
--- a/tests/benchmark/health_bench_test.go
+++ b/tests/benchmark/health_bench_test.go
@@ -6,8 +6,8 @@ import (
"testing"
"time"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/config"
)
// BenchmarkHealthEndpoint measures the performance of the /health endpoint.
diff --git a/tests/benchmark/memory_bench_test.go b/tests/benchmark/memory_bench_test.go
index bc8480c..6141cad 100644
--- a/tests/benchmark/memory_bench_test.go
+++ b/tests/benchmark/memory_bench_test.go
@@ -6,7 +6,7 @@ import (
"runtime"
"testing"
- "github.com/kimuyb/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/api"
)
// BenchmarkMemoryAllocation measures memory allocations per request.
diff --git a/tests/benchmark/namespace_bench_test.go b/tests/benchmark/namespace_bench_test.go
index b2144a7..26f64db 100644
--- a/tests/benchmark/namespace_bench_test.go
+++ b/tests/benchmark/namespace_bench_test.go
@@ -5,7 +5,7 @@ import (
"net/http/httptest"
"testing"
- "github.com/kimuyb/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/api"
)
// BenchmarkListNamespaces measures the performance of listing namespaces.
diff --git a/tests/benchmark/pool_bench_test.go b/tests/benchmark/pool_bench_test.go
index cca288d..71dbea8 100644
--- a/tests/benchmark/pool_bench_test.go
+++ b/tests/benchmark/pool_bench_test.go
@@ -8,7 +8,7 @@ import (
"sync"
"testing"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// =============================================================================
diff --git a/tests/benchmark/table_bench_test.go b/tests/benchmark/table_bench_test.go
index e6ea6d4..32395ac 100644
--- a/tests/benchmark/table_bench_test.go
+++ b/tests/benchmark/table_bench_test.go
@@ -7,8 +7,8 @@ import (
"net/http/httptest"
"testing"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// BenchmarkListTables measures the performance of listing tables.
diff --git a/tests/contract/config_test.go b/tests/contract/config_test.go
index 243a814..e0558e7 100644
--- a/tests/contract/config_test.go
+++ b/tests/contract/config_test.go
@@ -8,8 +8,8 @@ import (
"testing"
"github.com/getkin/kin-openapi/openapi3filter"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/errors_test.go b/tests/contract/errors_test.go
index bdf4fb0..f028d45 100644
--- a/tests/contract/errors_test.go
+++ b/tests/contract/errors_test.go
@@ -9,8 +9,8 @@ import (
"testing"
"github.com/getkin/kin-openapi/openapi3filter"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/health_test.go b/tests/contract/health_test.go
index 345a61c..7e6b46e 100644
--- a/tests/contract/health_test.go
+++ b/tests/contract/health_test.go
@@ -6,7 +6,7 @@ import (
"net/http/httptest"
"testing"
- "github.com/kimuyb/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/metrics_test.go b/tests/contract/metrics_test.go
index d1ce68c..c9de87b 100644
--- a/tests/contract/metrics_test.go
+++ b/tests/contract/metrics_test.go
@@ -7,7 +7,7 @@ import (
"strings"
"testing"
- "github.com/kimuyb/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/namespace_test.go b/tests/contract/namespace_test.go
index 2e7312b..4f6fe5d 100644
--- a/tests/contract/namespace_test.go
+++ b/tests/contract/namespace_test.go
@@ -11,9 +11,9 @@ import (
"github.com/getkin/kin-openapi/openapi3filter"
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/scan_test.go b/tests/contract/scan_test.go
index 6d466a1..cd76833 100644
--- a/tests/contract/scan_test.go
+++ b/tests/contract/scan_test.go
@@ -10,8 +10,8 @@ import (
"testing"
"github.com/getkin/kin-openapi/openapi3filter"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/table_test.go b/tests/contract/table_test.go
index 4d4551e..c4341a3 100644
--- a/tests/contract/table_test.go
+++ b/tests/contract/table_test.go
@@ -11,8 +11,8 @@ import (
"github.com/getkin/kin-openapi/openapi3filter"
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/transaction_test.go b/tests/contract/transaction_test.go
index abfebf5..97b1b34 100644
--- a/tests/contract/transaction_test.go
+++ b/tests/contract/transaction_test.go
@@ -10,8 +10,8 @@ import (
"testing"
"github.com/getkin/kin-openapi/openapi3filter"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/contract/view_test.go b/tests/contract/view_test.go
index e738c3a..a73634c 100644
--- a/tests/contract/view_test.go
+++ b/tests/contract/view_test.go
@@ -11,8 +11,8 @@ import (
"github.com/getkin/kin-openapi/openapi3filter"
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/e2e/events_test.go b/tests/e2e/events_test.go
index b83d381..e162206 100644
--- a/tests/e2e/events_test.go
+++ b/tests/e2e/events_test.go
@@ -10,7 +10,7 @@ import (
"testing"
"time"
- "github.com/kimuyb/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go
index b32ec0a..e23fe37 100644
--- a/tests/e2e/metrics_test.go
+++ b/tests/e2e/metrics_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"time"
- "github.com/kimuyb/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/e2e/resilience_test.go b/tests/e2e/resilience_test.go
index 380018f..06be5d7 100644
--- a/tests/e2e/resilience_test.go
+++ b/tests/e2e/resilience_test.go
@@ -6,7 +6,7 @@ import (
"net/http/httptest"
"testing"
- "github.com/kimuyb/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/e2e/shutdown_test.go b/tests/e2e/shutdown_test.go
index abdd9e5..c9340a6 100644
--- a/tests/e2e/shutdown_test.go
+++ b/tests/e2e/shutdown_test.go
@@ -8,8 +8,8 @@ import (
"testing"
"time"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/tests/helpers_test.go b/tests/helpers_test.go
index a52c424..3404fe9 100644
--- a/tests/helpers_test.go
+++ b/tests/helpers_test.go
@@ -14,9 +14,9 @@ import (
"time"
"github.com/gofiber/fiber/v2"
- "github.com/kimuyb/bingsan/internal/api"
- "github.com/kimuyb/bingsan/internal/config"
- "github.com/kimuyb/bingsan/internal/db"
+ "github.com/teamPaprika/bingsan/internal/api"
+ "github.com/teamPaprika/bingsan/internal/config"
+ "github.com/teamPaprika/bingsan/internal/db"
)
// TestServer wraps an API server for testing.
diff --git a/tests/integration/pool_integration_test.go b/tests/integration/pool_integration_test.go
index 3218ef5..e15d72b 100644
--- a/tests/integration/pool_integration_test.go
+++ b/tests/integration/pool_integration_test.go
@@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// TestPoolIntegration tests the pool package under realistic conditions.
diff --git a/tests/unit/pool_test.go b/tests/unit/pool_test.go
index ef323a3..3dd9e86 100644
--- a/tests/unit/pool_test.go
+++ b/tests/unit/pool_test.go
@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/kimuyb/bingsan/internal/pool"
+ "github.com/teamPaprika/bingsan/internal/pool"
)
// =============================================================================
diff --git a/tests/unit/updates_test.go b/tests/unit/updates_test.go
index 55638ed..07f697c 100644
--- a/tests/unit/updates_test.go
+++ b/tests/unit/updates_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/kimuyb/bingsan/internal/api/handlers/updates"
+ "github.com/teamPaprika/bingsan/internal/api/handlers/updates"
)
// =============================================================================