diff --git a/.env.example b/.env.example index c2817bcdd..16603a039 100644 --- a/.env.example +++ b/.env.example @@ -43,4 +43,21 @@ VERSION=dev BUILD_ID=docker # Config source (file, env, consul, etc.) -CONFIG_SOURCE=file \ No newline at end of file +CONFIG_SOURCE=file + +# ============================================ +# CNPG REMOTE ACCESS (optional) +# ============================================ +# +# Publish the CNPG Postgres port on the docker host. +# - Default bind is loopback for safety (not reachable from LAN). +# - To connect from a workstation, set CNPG_PUBLIC_BIND=0.0.0.0 (or a specific LAN IP) +# and ensure your firewall allows inbound traffic on CNPG_PUBLIC_PORT. +# - These variables must be present in `.env` (or exported) when running `docker compose`. +# +# CNPG_PUBLIC_BIND=127.0.0.1 +# CNPG_PUBLIC_PORT=5455 +# +# If workstation clients connect by IP with CNPG_SSL_MODE=verify-full, ensure the CNPG +# server certificate includes the docker host IP in its SAN. +# CNPG_CERT_EXTRA_IPS=192.168.2.134 diff --git a/.gitignore b/.gitignore index e03d87155..bd8bf1f6c 100644 --- a/.gitignore +++ b/.gitignore @@ -198,6 +198,12 @@ serviceradar-web_* *.gz # Ko build system for Go + +# Elixir/Phoenix (web-ng/) +deps/ +_build/ +.elixir_ls/ +erl_crash.dump .kodata # Generated by Cargo diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 000000000..5ff2deecd --- /dev/null +++ b/.tool-versions @@ -0,0 +1,2 @@ +erlang 28.3 +elixir 1.19.4 diff --git a/AGENTS.md b/AGENTS.md index 66beaa043..222401ecd 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -34,8 +34,15 @@ ServiceRadar is a multi-component system made up of Go services (core, sync, reg - `k8s/demo/` – Demo cluster manifests (faker, core, sync, CNPG, etc.). - `docker/`, `docker/images/` – Container builds and push targets. - `web/` – Next.js UI and API routes. +- `web-ng/` – Phoenix (next-gen) UI/API monolith. - `proto/` – Protobuf definitions and generated Go code. +## Per-Directory Agent Guides + +This file applies repo-wide, but subdirectories may include their own `AGENTS.md` with more specific rules; always read and follow the closest one to the code you are editing. + +- `web-ng/AGENTS.md` – Phoenix/Elixir/LiveView/Ecto/HEEx guidelines (must follow for any `web-ng/**` changes). + ## Build & Test Commands - General Go lint/test: `make lint`, `make test`. @@ -80,6 +87,45 @@ Reference `docs/docs/agents.md` for: faker deployment details, CNPG truncate/res - Restart the stack: `APP_TAG=sha- docker compose up -d --force-recreate`. - Verify: `docker compose ps` (one-shot jobs like cert-generator/config-updater exit once finished; nginx may sit in "health: starting" briefly). +## Web-NG Remote Dev (CNPG) + +Use this playbook to run `web-ng/` on a workstation while connecting to the existing CNPG instance running on the docker host (example: `192.168.2.134`). + +### 1. Publish CNPG on the docker host + +- By default, CNPG is bound to loopback only. To allow LAN access, set these in the docker host `.env` (or export them before running compose): + - `CNPG_PUBLIC_BIND=0.0.0.0` (or a specific LAN interface IP) + - `CNPG_PUBLIC_PORT=5455` + +### 2. Ensure CNPG TLS cert supports IP-based clients (verify-full) + +- If clients will connect by IP with `CNPG_SSL_MODE=verify-full`, add the host IP to the CNPG server cert SAN: + - `CNPG_CERT_EXTRA_IPS=192.168.2.134` + - Regenerate certs: `CNPG_CERT_EXTRA_IPS=192.168.2.134 docker compose up cert-generator` + - Restart CNPG (and ensure bind env vars are applied): `CNPG_PUBLIC_BIND=0.0.0.0 CNPG_PUBLIC_PORT=5455 docker compose up -d --force-recreate cnpg` + +### 3. Copy workstation client certs (keep out of git) + +- Determine the cert volume name: `docker volume ls | rg 'cert-data'` +- Copy out these files from the volume to a private directory on your workstation: + - `root.pem` + - `workstation.pem` + - `workstation-key.pem` + +### 4. Run Phoenix from your workstation + +```bash +cd web-ng +export CNPG_HOST=192.168.2.134 +export CNPG_PORT=5455 +export CNPG_DATABASE=serviceradar +export CNPG_USERNAME=serviceradar +export CNPG_PASSWORD=serviceradar +export CNPG_SSL_MODE=verify-full +export CNPG_CERT_DIR=/path/to/private/serviceradar-certs +mix phx.server +``` + ## Edge Onboarding Testing with Docker mTLS Stack Use this playbook to test edge onboarding functionality (e.g., sysmon checker mTLS bootstrap) against the Docker Compose mTLS stack. diff --git a/Cargo.lock b/Cargo.lock index 70acc28d5..dc569856b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -959,7 +959,7 @@ dependencies = [ "serde 1.0.228", "serde_json 1.0.145", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.17", "tokio", "toml 0.8.23", "tracing", @@ -1517,7 +1517,7 @@ dependencies = [ "serde 1.0.228", "serde_json 1.0.145", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.17", "tracing", "ureq", "urlencoding", @@ -2734,7 +2734,7 @@ dependencies = [ "serde 1.0.228", "serde_json 1.0.145", "spiffe", - "thiserror 1.0.69", + "thiserror 2.0.17", "tokio", "tokio-stream", "toml 0.8.23", @@ -5961,7 +5961,7 @@ dependencies = [ "serde_json 1.0.145", "serde_with", "serial_test", - "thiserror 1.0.69", + "thiserror 2.0.17", "tokio", "tokio-postgres", "tokio-postgres-rustls", diff --git a/cmd/core/app/app.go b/cmd/core/app/app.go index 79fc0530e..bda45273d 100644 --- a/cmd/core/app/app.go +++ b/cmd/core/app/app.go @@ -30,13 +30,8 @@ var ( // Options contains runtime configuration derived from CLI flags. type Options struct { - ConfigPath string - BackfillEnabled bool - BackfillDryRun bool - BackfillSeedKV bool - BackfillIPs bool - BackfillNamespace string - DisableWatch bool + ConfigPath string + DisableWatch bool } // Run boots the core service using the provided options. @@ -129,15 +124,6 @@ func Run(ctx context.Context, opts Options) error { return err } - if opts.BackfillEnabled { - backfillOpts := core.BackfillOptions{ - DryRun: opts.BackfillDryRun, - SeedKVOnly: opts.BackfillSeedKV, - Namespace: opts.BackfillNamespace, - } - return runBackfill(ctx, server, mainLogger, backfillOpts, opts.BackfillIPs) - } - apiOptions := bootstrap.BuildAPIServerOptions(&cfg, mainLogger, spireAdminClient) requireDeviceRegistry := cfg.Features.RequireDeviceRegistry != nil && *cfg.Features.RequireDeviceRegistry @@ -218,38 +204,3 @@ func Run(ctx context.Context, opts Options) error { }, }) } - -func runBackfill(ctx context.Context, server *core.Server, mainLogger logger.Logger, opts core.BackfillOptions, includeIPs bool) error { - startMsg := "Starting identity backfill (Armis/NetBox) ..." - if opts.DryRun { - startMsg = "Starting identity backfill (Armis/NetBox) in DRY-RUN mode ..." - } - mainLogger.Info().Msg(startMsg) - - if err := core.BackfillIdentityTombstones(ctx, server.DB, server.IdentityKVClient(), mainLogger, opts); err != nil { - return err - } - - if includeIPs { - ipMsg := "Starting IP alias backfill ..." - if opts.DryRun { - ipMsg = "Starting IP alias backfill (DRY-RUN) ..." - } else if opts.SeedKVOnly { - ipMsg = "Starting IP alias backfill (KV seeding only) ..." - } - mainLogger.Info().Msg(ipMsg) - - if err := core.BackfillIPAliasTombstones(ctx, server.DB, server.IdentityKVClient(), mainLogger, opts); err != nil { - return err - } - } - - completionMsg := "Backfill completed. Exiting." - if opts.DryRun { - completionMsg = "Backfill DRY-RUN completed. Exiting." - } else if opts.SeedKVOnly { - completionMsg = "Backfill KV seeding completed. Exiting." - } - mainLogger.Info().Msg(completionMsg) - return nil -} diff --git a/cmd/core/main.go b/cmd/core/main.go index 597a4d55f..8c83ec28b 100644 --- a/cmd/core/main.go +++ b/cmd/core/main.go @@ -55,31 +55,6 @@ import ( "github.com/carverauto/serviceradar/cmd/core/app" ) -type coreFlags struct { - ConfigPath string - Backfill bool - BackfillDryRun bool - BackfillSeedKV bool - BackfillIPs bool -} - -func parseFlags() coreFlags { - configPath := flag.String("config", "/etc/serviceradar/core.json", "Path to core config file") - backfill := flag.Bool("backfill-identities", false, "Run one-time identity backfill (Armis/NetBox) and exit") - backfillDryRun := flag.Bool("backfill-dry-run", false, "If set with --backfill-identities, only log actions without writing") - backfillSeedKV := flag.Bool("seed-kv-only", false, "Seed canonical identity map without emitting tombstones") - backfillIPs := flag.Bool("backfill-ips", true, "Also backfill sweep-only device IDs by IP aliasing into canonical identities") - flag.Parse() - - return coreFlags{ - ConfigPath: *configPath, - Backfill: *backfill, - BackfillDryRun: *backfillDryRun, - BackfillSeedKV: *backfillSeedKV, - BackfillIPs: *backfillIPs, - } -} - func main() { if err := run(); err != nil { log.Fatalf("Fatal error: %v", err) @@ -87,16 +62,13 @@ func main() { } func run() error { - opts := parseFlags() + configPath := flag.String("config", "/etc/serviceradar/core.json", "Path to core config file") + flag.Parse() + watchEnabled := parseEnvBool("CONFIG_WATCH_ENABLED", true) appOptions := app.Options{ - ConfigPath: opts.ConfigPath, - BackfillEnabled: opts.Backfill, - BackfillDryRun: opts.BackfillDryRun, - BackfillSeedKV: opts.BackfillSeedKV, - BackfillIPs: opts.BackfillIPs, - BackfillNamespace: "", - DisableWatch: !watchEnabled, + ConfigPath: *configPath, + DisableWatch: !watchEnabled, } return app.Run(context.Background(), appOptions) diff --git a/cmd/tools/kv-sweep/BUILD.bazel b/cmd/tools/kv-sweep/BUILD.bazel deleted file mode 100644 index 847abc556..000000000 --- a/cmd/tools/kv-sweep/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "kv_sweep_lib", - srcs = ["main.go"], - importpath = "github.com/carverauto/serviceradar/cmd/tools/kv-sweep", - visibility = ["//visibility:private"], - deps = [ - "//alias:config-bootstrap", - "//alias:identitymap", - "//alias:logger", - "//alias:models", - "//proto", - "//proto/identitymap/v1:identity_map", - "@com_github_nats_io_nats_go//:nats_go", - "@org_golang_google_grpc//:grpc", - ], -) - -go_binary( - name = "kv-sweep", - embed = [":kv_sweep_lib"], - visibility = ["//visibility:public"], -) diff --git a/cmd/tools/kv-sweep/main.go b/cmd/tools/kv-sweep/main.go deleted file mode 100644 index f245eedea..000000000 --- a/cmd/tools/kv-sweep/main.go +++ /dev/null @@ -1,592 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "flag" - "fmt" - "io/fs" - "log" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/nats-io/nats.go" - "google.golang.org/grpc" - - "github.com/carverauto/serviceradar/pkg/config/bootstrap" - "github.com/carverauto/serviceradar/pkg/identitymap" - "github.com/carverauto/serviceradar/pkg/logger" - "github.com/carverauto/serviceradar/pkg/models" - "github.com/carverauto/serviceradar/proto" - identitymappb "github.com/carverauto/serviceradar/proto/identitymap/v1" -) - -type sweepConfig struct { - natsURL string - natsUser string - natsPass string - natsCreds string - natsNKey string - natsTLSCert string - natsTLSKey string - natsTLSCA string - natsInsecureTLS bool - jsDomain string - bucket string - prefix string - maxKeys int - deleteCorrupt bool - dryRun bool - reportPath string - dumpDir string - rehydrate bool - coreAddress string - coreRole string - timeout time.Duration -} - -type corruptRecord struct { - Key string `json:"key"` - Revision uint64 `json:"revision"` - Error string `json:"error"` - DumpedPayload string `json:"dump_path,omitempty"` -} - -type sweepStats struct { - totalKeys int - filteredKeys int - validRecords int - corruptRecords int - deleted int - deleteFailures int - rehydrated int - rehydrateFail int - startedAt time.Time -} - -var ( - errBucketRequired = errors.New("bucket is required") - errRehydrateCoreAddress = errors.New("rehydrate requested but CORE_ADDRESS not provided") - errCoreAddressNotConfigured = errors.New("CORE_ADDRESS not configured") - errInvalidKeyPath = errors.New("invalid key path") - errUnknownIdentitySegment = errors.New("unknown identity kind segment") - errInvalidHexEscape = errors.New("invalid hex escape length in sanitized segment") - errUnsafeASCIICharacter = errors.New("decoded character out of safe ASCII range") -) - -func main() { - cfg := parseFlags() - if err := run(cfg); err != nil { - log.Fatalf("kv-sweep failed: %v", err) - } -} - -func parseFlags() sweepConfig { - var cfg sweepConfig - flag.StringVar(&cfg.natsURL, "nats-url", getenvDefault("NATS_URL", "nats://serviceradar-nats:4222"), "NATS server URL") - flag.StringVar(&cfg.natsUser, "nats-user", os.Getenv("NATS_USER"), "NATS username") - flag.StringVar(&cfg.natsPass, "nats-pass", os.Getenv("NATS_PASSWORD"), "NATS password") - flag.StringVar(&cfg.natsCreds, "nats-creds", os.Getenv("NATS_CREDS"), "path to NATS creds file") - flag.StringVar(&cfg.natsNKey, "nats-nkey", os.Getenv("NATS_NKEY"), "path to NATS NKey seed file") - flag.StringVar(&cfg.natsTLSCert, "nats-tls-cert", os.Getenv("NATS_TLS_CERT"), "path to TLS client certificate") - flag.StringVar(&cfg.natsTLSKey, "nats-tls-key", os.Getenv("NATS_TLS_KEY"), "path to TLS client key") - flag.StringVar(&cfg.natsTLSCA, "nats-tls-ca", os.Getenv("NATS_CA"), "path to TLS CA bundle") - flag.BoolVar(&cfg.natsInsecureTLS, "nats-tls-insecure", false, "skip TLS verification (development only)") - flag.StringVar(&cfg.jsDomain, "js-domain", os.Getenv("NATS_JS_DOMAIN"), "JetStream domain (optional)") - - flag.StringVar(&cfg.bucket, "bucket", getenvDefault("KV_BUCKET", "serviceradar-datasvc"), "KV bucket to scan") - flag.StringVar(&cfg.prefix, "prefix", identitymap.DefaultNamespace+"/", "key prefix to filter (default: canonical map)") - flag.IntVar(&cfg.maxKeys, "max-keys", 0, "limit number of keys to inspect (0 = no limit)") - flag.BoolVar(&cfg.deleteCorrupt, "delete", false, "delete corrupt entries after recording them") - flag.BoolVar(&cfg.dryRun, "dry-run", false, "log planned actions without performing mutations") - flag.StringVar(&cfg.reportPath, "report", "", "optional path to write JSON report of corrupt keys") - flag.StringVar(&cfg.dumpDir, "dump-dir", "", "directory to dump raw payloads for corrupt entries") - flag.DurationVar(&cfg.timeout, "timeout", 5*time.Second, "per-key operation timeout") - - flag.BoolVar(&cfg.rehydrate, "rehydrate", false, "call CoreService.GetCanonicalDevice after deleting corrupt entries") - flag.StringVar(&cfg.coreAddress, "core-address", os.Getenv("CORE_ADDRESS"), "core gRPC address (overrides CORE_ADDRESS env when set)") - flag.StringVar(&cfg.coreRole, "core-role", string(models.RoleAgent), "service role identity to present to core when rehydrating") - - flag.Parse() - - return cfg -} - -func run(cfg sweepConfig) error { - if err := validateSweepConfig(cfg); err != nil { - return err - } - if err := ensureCoreAddress(cfg); err != nil { - return err - } - - nc, err := connectNATS(cfg) - if err != nil { - return fmt.Errorf("connect to NATS: %w", err) - } - defer drainNATS(nc) - - jsOpts := []nats.JSOpt{} - if cfg.jsDomain != "" { - jsOpts = append(jsOpts, nats.Domain(cfg.jsDomain)) - } - js, err := nc.JetStream(jsOpts...) - if err != nil { - return fmt.Errorf("init JetStream: %w", err) - } - - kv, err := js.KeyValue(cfg.bucket) - if err != nil { - return fmt.Errorf("open bucket %q: %w", cfg.bucket, err) - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - stats := sweepStats{startedAt: time.Now()} - var problems []corruptRecord - - var coreClient proto.CoreServiceClient - var coreCleanup func() - if cfg.rehydrate && !cfg.dryRun { - coreClient, coreCleanup, err = connectCore(ctx, cfg) - if err != nil { - return fmt.Errorf("connect to core: %w", err) - } - defer coreCleanup() - } - - if cfg.dumpDir != "" { - if err := os.MkdirAll(cfg.dumpDir, 0o755); err != nil { - return fmt.Errorf("create dump dir: %w", err) - } - } - - if err := scanBucket(ctx, kv, cfg, &stats, &problems, coreClient); err != nil { - return err - } - - if cfg.reportPath != "" { - report := map[string]any{ - "bucket": cfg.bucket, - "prefix": cfg.prefix, - "stats": stats, - "corrupt": problems, - "generated_at": time.Now().UTC().Format(time.RFC3339), - } - if err := writeJSON(cfg.reportPath, report); err != nil { - return fmt.Errorf("write report: %w", err) - } - } - - log.Printf("Scanned %d keys (prefix match %d); valid=%d corrupt=%d deleted=%d (%d failures) rehydrated=%d (%d failures) in %s", - stats.totalKeys, - stats.filteredKeys, - stats.validRecords, - stats.corruptRecords, - stats.deleted, - stats.deleteFailures, - stats.rehydrated, - stats.rehydrateFail, - time.Since(stats.startedAt).Round(time.Millisecond), - ) - - if cfg.dryRun && cfg.deleteCorrupt { - log.Println("Dry-run mode was enabled; no keys were deleted.") - } - - if len(problems) > 0 && cfg.reportPath == "" { - output, _ := json.MarshalIndent(problems, "", " ") - fmt.Printf("%s\n", output) - } - - return nil -} - -func validateSweepConfig(cfg sweepConfig) error { - if cfg.bucket == "" { - return errBucketRequired - } - return nil -} - -func ensureCoreAddress(cfg sweepConfig) error { - if !cfg.rehydrate { - return nil - } - - if cfg.coreAddress != "" { - if err := os.Setenv("CORE_ADDRESS", cfg.coreAddress); err != nil { - return fmt.Errorf("set CORE_ADDRESS: %w", err) - } - return nil - } - - if os.Getenv("CORE_ADDRESS") == "" { - return errRehydrateCoreAddress - } - - return nil -} - -func scanBucket( - ctx context.Context, - kv nats.KeyValue, - cfg sweepConfig, - stats *sweepStats, - problems *[]corruptRecord, - coreClient proto.CoreServiceClient, -) error { - lister, err := kv.ListKeys() - if err != nil { - return fmt.Errorf("list keys: %w", err) - } - defer stopLister(lister) - - for key := range lister.Keys() { - stats.totalKeys++ - if cfg.prefix != "" && !strings.HasPrefix(key, cfg.prefix) { - continue - } - - stats.filteredKeys++ - if cfg.maxKeys > 0 && stats.filteredKeys > cfg.maxKeys { - break - } - - if err := processKey(ctx, kv, key, cfg, stats, problems, coreClient); err != nil { - log.Printf("WARN: %v", err) - } - } - - return nil -} - -func processKey( - ctx context.Context, - kv nats.KeyValue, - key string, - cfg sweepConfig, - stats *sweepStats, - problems *[]corruptRecord, - coreClient proto.CoreServiceClient, -) error { - entry, err := kv.Get(key) - if err != nil { - return fmt.Errorf("failed to get key %s: %w", key, err) - } - - value := entry.Value() - if _, err := identitymap.UnmarshalRecord(value); err != nil { - handleCorruptEntry(ctx, cfg, kv, key, entry, value, stats, problems, coreClient, err) - return nil - } - - stats.validRecords++ - return nil -} - -func handleCorruptEntry( - ctx context.Context, - cfg sweepConfig, - kv nats.KeyValue, - key string, - entry nats.KeyValueEntry, - value []byte, - stats *sweepStats, - problems *[]corruptRecord, - coreClient proto.CoreServiceClient, - parseErr error, -) { - stats.corruptRecords++ - rec := corruptRecord{ - Key: key, - Revision: entry.Revision(), - Error: parseErr.Error(), - } - - if cfg.dumpDir != "" { - if path, dumpErr := dumpPayload(cfg.dumpDir, key, value); dumpErr != nil { - log.Printf("WARN: failed to dump payload for %s: %v", key, dumpErr) - } else { - rec.DumpedPayload = path - } - } - - if cfg.deleteCorrupt { - if cfg.dryRun { - log.Printf("[dry-run] would delete %s (rev=%d)", key, entry.Revision()) - } else if err := kv.Delete(key); err != nil { - stats.deleteFailures++ - log.Printf("ERROR: failed to delete %s: %v", key, err) - } else { - stats.deleted++ - log.Printf("Deleted %s (rev=%d)", key, entry.Revision()) - } - } - - if cfg.rehydrate && !cfg.dryRun && coreClient != nil { - if err := requestRehydrate(ctx, coreClient, key); err != nil { - stats.rehydrateFail++ - log.Printf("WARN: rehydrate request for %s failed: %v", key, err) - } else { - stats.rehydrated++ - } - } - - *problems = append(*problems, rec) -} - -func drainNATS(nc *nats.Conn) { - if nc == nil { - return - } - - if err := nc.Drain(); err != nil { - log.Printf("WARN: failed to drain NATS connection: %v", err) - } -} - -func stopLister(lister nats.KeyLister) { - if lister == nil { - return - } - if err := lister.Stop(); err != nil { - log.Printf("WARN: failed to stop key lister: %v", err) - } -} - -func connectNATS(cfg sweepConfig) (*nats.Conn, error) { - opts := []nats.Option{ - nats.Name("serviceradar-kv-sweep"), - nats.Timeout(10 * time.Second), - } - - if cfg.natsUser != "" { - opts = append(opts, nats.UserInfo(cfg.natsUser, cfg.natsPass)) - } - if cfg.natsCreds != "" { - opts = append(opts, nats.UserCredentials(cfg.natsCreds)) - } - if cfg.natsNKey != "" { - opt, err := nats.NkeyOptionFromSeed(cfg.natsNKey) - if err != nil { - return nil, fmt.Errorf("load NATS nkey seed: %w", err) - } - opts = append(opts, opt) - } - - tlsConfig, err := buildTLSConfig(cfg) - if err != nil { - return nil, err - } - if tlsConfig != nil { - opts = append(opts, nats.Secure(tlsConfig)) - } - - return nats.Connect(cfg.natsURL, opts...) -} - -func buildTLSConfig(cfg sweepConfig) (*tls.Config, error) { - if cfg.natsTLSCert == "" && cfg.natsTLSKey == "" && cfg.natsTLSCA == "" && !cfg.natsInsecureTLS { - return nil, nil - } - - tlsConfig := &tls.Config{ - MinVersion: tls.VersionTLS12, - } - if cfg.natsInsecureTLS { - tlsConfig.InsecureSkipVerify = true - } - - if cfg.natsTLSCA != "" { - caCert, err := os.ReadFile(cfg.natsTLSCA) - if err != nil { - return nil, fmt.Errorf("read NATS CA file: %w", err) - } - cp := x509.NewCertPool() - cp.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = cp - } - - if cfg.natsTLSCert != "" && cfg.natsTLSKey != "" { - cert, err := tls.LoadX509KeyPair(cfg.natsTLSCert, cfg.natsTLSKey) - if err != nil { - return nil, fmt.Errorf("load client certificate: %w", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - return tlsConfig, nil -} - -func dumpPayload(dir, key string, value []byte) (string, error) { - safeName := strings.NewReplacer("/", "_", ":", "_").Replace(key) - target := filepath.Join(dir, safeName+".b64") - encoded := base64.StdEncoding.EncodeToString(value) - if err := os.WriteFile(target, []byte(encoded+"\n"), fs.FileMode(0o644)); err != nil { - return "", err - } - return target, nil -} - -func writeJSON(path string, value any) error { - data, err := json.MarshalIndent(value, "", " ") - if err != nil { - return err - } - return os.WriteFile(path, data, 0o644) -} - -func getenvDefault(key, fallback string) string { - if val := os.Getenv(key); val != "" { - return val - } - return fallback -} - -func connectCore(ctx context.Context, cfg sweepConfig) (proto.CoreServiceClient, func(), error) { - role := models.ServiceRole(cfg.coreRole) - dialOpts, closeProvider, err := bootstrap.BuildCoreDialOptionsFromEnv(ctx, role, logger.NewTestLogger()) - if err != nil { - return nil, nil, err - } - - address := cfg.coreAddress - if address == "" { - address = os.Getenv("CORE_ADDRESS") - } - if address == "" { - return nil, nil, errCoreAddressNotConfigured - } - - conn, err := grpc.NewClient(address, dialOpts...) - if err != nil { - closeProvider() - return nil, nil, err - } - - cleanup := func() { - _ = conn.Close() - closeProvider() - } - return proto.NewCoreServiceClient(conn), cleanup, nil -} - -func requestRehydrate(ctx context.Context, client proto.CoreServiceClient, keyPath string) error { - key, err := keyFromPath(keyPath) - if err != nil { - return err - } - - req := &proto.GetCanonicalDeviceRequest{ - Namespace: identitymap.DefaultNamespace, - IdentityKeys: []*identitymappb.IdentityKey{ - { - Kind: key.Kind, - Value: key.Value, - }, - }, - } - - _, err = client.GetCanonicalDevice(ctx, req) - return err -} - -func keyFromPath(path string) (identitymap.Key, error) { - trimmed := strings.Trim(path, "/") - parts := strings.Split(trimmed, "/") - if len(parts) < 3 { - return identitymap.Key{}, fmt.Errorf("%w: %s", errInvalidKeyPath, path) - } - - kind, err := kindFromSegment(parts[1]) - if err != nil { - return identitymap.Key{}, err - } - - value, err := unsanitizeSegment(strings.Join(parts[2:], "/")) - if err != nil { - return identitymap.Key{}, fmt.Errorf("decode key %s: %w", path, err) - } - - return identitymap.Key{ - Kind: kind, - Value: value, - }, nil -} - -func kindFromSegment(seg string) (identitymap.Kind, error) { - switch seg { - case "device-id": - return identitymap.KindDeviceID, nil - case "armis-id": - return identitymap.KindArmisID, nil - case "netbox-id": - return identitymap.KindNetboxID, nil - case "mac": - return identitymap.KindMAC, nil - case "ip": - return identitymap.KindIP, nil - case "partition-ip": - return identitymap.KindPartitionIP, nil - default: - return identitymap.KindUnspecified, fmt.Errorf("%w: %s", errUnknownIdentitySegment, seg) - } -} - -func unsanitizeSegment(seg string) (string, error) { - if !strings.Contains(seg, "=") { - return seg, nil - } - - var b strings.Builder - b.Grow(len(seg)) - - for i := 0; i < len(seg); i++ { - ch := seg[i] - if ch != '=' { - b.WriteByte(ch) - continue - } - - j := i + 1 - for j < len(seg) && isHexDigit(seg[j]) { - j++ - } - - if j == i+1 { - // Lone '='; leave as-is. - b.WriteByte('=') - continue - } - - hexRun := seg[i+1 : j] - if len(hexRun)%2 != 0 { - return "", fmt.Errorf("%w after '=' in %q", errInvalidHexEscape, seg) - } - - val, err := strconv.ParseInt(hexRun, 16, 32) - if err != nil { - return "", err - } - if val < 0x20 || val > 0x7E { - return "", fmt.Errorf("%w in %q", errUnsafeASCIICharacter, seg) - } - b.WriteByte(byte(val)) - i = j - 1 - } - - return b.String(), nil -} - -func isHexDigit(b byte) bool { - return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F') -} diff --git a/docker-compose.yml b/docker-compose.yml index 92813de3b..266cf4d02 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,6 +4,10 @@ services: cert-generator: image: alpine:3.20 container_name: serviceradar-cert-generator-mtls + environment: + # Optional extra SAN entries for `cnpg.pem` so clients can connect using a LAN IP + # (example: CNPG_CERT_EXTRA_IPS=192.168.2.134) + - CNPG_CERT_EXTRA_IPS=${CNPG_CERT_EXTRA_IPS:-} volumes: - cert-data:/certs - ./docker/compose/generate-certs.sh:/generate-certs.sh:ro @@ -36,6 +40,8 @@ services: - POSTGRES_PASSWORD=${CNPG_PASSWORD:-serviceradar} - POSTGRES_DB=${CNPG_DATABASE:-serviceradar} - TS_TUNE=false + ports: + - "${CNPG_PUBLIC_BIND:-127.0.0.1}:${CNPG_PUBLIC_PORT:-5455}:5432" volumes: - cnpg-data:/var/lib/postgresql/data - ./docker/compose/cnpg-init.sql:/docker-entrypoint-initdb.d/001-init.sql:ro @@ -364,39 +370,6 @@ services: - poller.serviceradar restart: unless-stopped - srql: - image: ghcr.io/carverauto/serviceradar-srql:${APP_TAG:-v1.0.67} - container_name: serviceradar-srql-mtls - environment: - - AUTH_ENABLED=true - - SRQL_API_KEY=${SRQL_API_KEY:-} - - SRQL_LISTEN_HOST=0.0.0.0 - - SRQL_LISTEN_PORT=8080 - - CNPG_HOST=${CNPG_HOST:-cnpg} - - CNPG_PORT=${CNPG_PORT:-5432} - - CNPG_DATABASE=${CNPG_DATABASE:-serviceradar} - - CNPG_USERNAME=${CNPG_USERNAME:-serviceradar} - - CNPG_PASSWORD=${CNPG_PASSWORD:-serviceradar} - - CNPG_SSLMODE=${CNPG_SSL_MODE:-verify-full} - - CNPG_CERT_DIR=/etc/serviceradar/certs - - SRQL_DATABASE_URL=${SRQL_DATABASE_URL:-} - volumes: - - cert-data:/etc/serviceradar/certs:ro - - generated-config:/etc/serviceradar/config:ro - - credentials:/etc/serviceradar/credentials:ro - depends_on: - cnpg: - condition: service_healthy - healthcheck: - test: ["CMD-SHELL", "bash -c '/dev/null || true; fi; until [ -f /opt/kong/kong.yml ]; do echo '[kong] waiting for /opt/kong/kong.yml'; sleep 2; done; rm -f /usr/local/kong/pids/nginx.pid /opt/kong/pids/nginx.pid 2>/dev/null || true; exec kong start --vv"] - volumes: - - kong_config:/opt/kong - - ./docker/kong/kong.yaml:/default-kong.yml:ro - ports: - - "8000:8000" - - "8001:8001" - networks: - - serviceradar-net - healthcheck: - test: ["CMD", "kong", "health"] - interval: 10s - timeout: 5s - retries: 12 - start_period: 20s - - web: - image: ghcr.io/carverauto/serviceradar-web:${APP_TAG:-v1.0.67} - container_name: serviceradar-web-mtls + web-ng: + build: + context: . + dockerfile: docker/compose/Dockerfile.web-ng + container_name: serviceradar-web-ng-mtls environment: - - NODE_ENV=production - - NEXT_PUBLIC_API_URL=${PUBLIC_API_URL:-http://localhost/api} - - NEXT_INTERNAL_API_URL=http://core:8090 - - NEXT_INTERNAL_SRQL_URL=http://kong:8000 - - AUTH_ENABLED=true + - PORT=4000 + - PHX_SERVER=true + - SECRET_KEY_BASE=${WEB_NG_SECRET_KEY_BASE:-H8DPohD5rFUqGboVqCKLYXrlyofYUJk6k+XBzKEb5G8LN9brhYpNloE3UgxBQmPW} + - CNPG_HOST=${CNPG_HOST:-cnpg} + - CNPG_PORT=${CNPG_PORT:-5432} + - CNPG_DATABASE=${CNPG_DATABASE:-serviceradar} + - CNPG_USERNAME=${CNPG_USERNAME:-serviceradar} + - CNPG_PASSWORD=${CNPG_PASSWORD:-serviceradar} + - CNPG_SSL_MODE=${CNPG_SSL_MODE:-verify-full} + - CNPG_TLS_SERVER_NAME=${CNPG_TLS_SERVER_NAME:-cnpg} + - CNPG_CERT_DIR=/etc/serviceradar/certs volumes: - cert-data:/etc/serviceradar/certs:ro - - ./docker/compose/web.docker.json:/etc/serviceradar/web.json:ro - - generated-config:/etc/serviceradar/config:ro - - credentials:/etc/serviceradar/credentials:ro depends_on: - config-updater: + cert-permissions-fixer: condition: service_completed_successfully - core: - condition: service_started + cnpg: + condition: service_healthy healthcheck: - test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://0.0.0.0:3000/"] + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://0.0.0.0:4000/"] interval: 30s timeout: 10s retries: 3 @@ -803,18 +717,13 @@ services: - "${SERVICERADAR_HTTP_PORT:-80}:80" - "${SERVICERADAR_HTTPS_PORT:-443}:443" environment: - API_UPSTREAM: ${API_UPSTREAM:-http://kong:8000} DNS_RESOLVER: "127.0.0.11" # Docker's internal DNS volumes: - cert-data:/etc/serviceradar/certs:ro - ./docker/compose/nginx.conf.template:/etc/nginx/templates/default.conf.template:ro - ./docker/compose/entrypoint-nginx.sh:/docker-entrypoint.d/50-serviceradar.sh:ro depends_on: - web: - condition: service_started - core: - condition: service_started - kong: + web-ng: condition: service_started healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1/"] @@ -851,7 +760,6 @@ volumes: mapper-data: snmp-checker-data: rperf-client-data: - kong_config: networks: serviceradar-net: diff --git a/docker/compose/Dockerfile.web-ng b/docker/compose/Dockerfile.web-ng new file mode 100644 index 000000000..ab3a725cf --- /dev/null +++ b/docker/compose/Dockerfile.web-ng @@ -0,0 +1,71 @@ +FROM hexpm/elixir:1.19.4-erlang-28.3-debian-bookworm-20251208-slim AS build + +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + curl \ + git \ + pkg-config \ + protobuf-compiler \ + libssl-dev \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal +ENV PATH="/root/.cargo/bin:${PATH}" + +WORKDIR /app + +COPY web-ng/mix.exs web-ng/mix.lock ./web-ng/ +COPY web-ng/config ./web-ng/config +COPY web-ng/lib ./web-ng/lib +COPY web-ng/priv ./web-ng/priv +COPY web-ng/assets ./web-ng/assets +COPY web-ng/native ./web-ng/native + +COPY proto ./proto + +COPY rust/srql ./rust/srql +COPY rust/kvutil ./rust/kvutil + +RUN mkdir -p /app/rust \ + && cat > /app/rust/Cargo.toml <<'EOF' +[workspace] +resolver = "2" +members = [ + "srql", + "kvutil", +] + +[workspace.dependencies] +tonic = { version = "0.12", features = ["tls"] } +prost = "0.13" +tonic-build = "0.12" +tokio = { version = "1" } +tokio-stream = "0.1" +EOF + +WORKDIR /app/web-ng + +ENV MIX_ENV=prod + +RUN mix local.hex --force && mix local.rebar --force +RUN mix deps.get --only prod +RUN mix deps.compile +RUN mix compile +RUN mix assets.deploy + +FROM hexpm/elixir:1.19.4-erlang-28.3-debian-bookworm-20251208-slim AS runtime + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /app/web-ng + +ENV MIX_ENV=prod + +COPY --from=build /app/web-ng /app/web-ng + +EXPOSE 4000 + +CMD ["sh", "-lc", "mix ecto.migrate && exec mix phx.server"] diff --git a/docker/compose/entrypoint-nginx.sh b/docker/compose/entrypoint-nginx.sh index cd760ad37..10d214e19 100755 --- a/docker/compose/entrypoint-nginx.sh +++ b/docker/compose/entrypoint-nginx.sh @@ -17,19 +17,11 @@ set -e echo "[Nginx Init] Waiting for upstream services to be ready..." -# Wait for web service -if wait-for-port --host web --port 3000 --attempts 30 --interval 2s --quiet; then - echo "[Nginx Init] Web service is ready on port 3000" +# Wait for web-ng service +if wait-for-port --host web-ng --port 4000 --attempts 30 --interval 2s --quiet; then + echo "[Nginx Init] Web-NG service is ready on port 4000" else - echo "[Nginx Init] ERROR: Timed out waiting for web service on port 3000" >&2 - exit 1 -fi - -# Wait for core service -if wait-for-port --host core --port 8090 --attempts 30 --interval 2s --quiet; then - echo "[Nginx Init] Core service is ready on port 8090" -else - echo "[Nginx Init] ERROR: Timed out waiting for core service on port 8090" >&2 + echo "[Nginx Init] ERROR: Timed out waiting for web-ng service on port 4000" >&2 exit 1 fi diff --git a/docker/compose/generate-certs.sh b/docker/compose/generate-certs.sh index 59bbc519e..fe321a421 100755 --- a/docker/compose/generate-certs.sh +++ b/docker/compose/generate-certs.sh @@ -65,12 +65,24 @@ generate_cert() { local san=$3 if [ -f "$CERT_DIR/$component.pem" ]; then - echo "Certificate for $component already exists, ensuring permissions." - chmod 600 "$CERT_DIR/$component-key.pem" 2>/dev/null || true - if [ "$component" = "cnpg" ]; then - chown 26:999 "$CERT_DIR/$component-key.pem" "$CERT_DIR/$component.pem" 2>/dev/null || true + if [ "$component" = "cnpg" ] && [ -n "${CNPG_CERT_EXTRA_IPS:-}" ]; then + for ip in $(echo "$CNPG_CERT_EXTRA_IPS" | tr ',' ' '); do + if ! openssl x509 -in "$CERT_DIR/$component.pem" -noout -text | grep -q "IP Address:${ip}"; then + echo "CNPG certificate is missing SAN IP ${ip}; regenerating cnpg certificate..." + rm -f "$CERT_DIR/$component.pem" "$CERT_DIR/$component-key.pem" + break + fi + done + fi + + if [ -f "$CERT_DIR/$component.pem" ]; then + echo "Certificate for $component already exists, ensuring permissions." + chmod 600 "$CERT_DIR/$component-key.pem" 2>/dev/null || true + if [ "$component" = "cnpg" ]; then + chown 26:999 "$CERT_DIR/$component-key.pem" "$CERT_DIR/$component.pem" 2>/dev/null || true + fi + return fi - return fi echo "Generating certificate for $component..." @@ -147,7 +159,17 @@ generate_cert "agent" "agent.serviceradar" "DNS:agent,DNS:agent.serviceradar,DNS generate_cert "web" "web.serviceradar" "DNS:web,DNS:web.serviceradar,DNS:serviceradar-web,DNS:localhost,IP:127.0.0.1" generate_cert "db-event-writer" "db-event-writer.serviceradar" "DNS:db-event-writer,DNS:db-event-writer.serviceradar,DNS:serviceradar-db-event-writer,DNS:localhost,IP:127.0.0.1" generate_cert "srql" "srql.serviceradar" "DNS:srql,DNS:srql.serviceradar,DNS:serviceradar-srql,DNS:localhost,IP:127.0.0.1" -generate_cert "cnpg" "cnpg.serviceradar" "DNS:cnpg,DNS:cnpg-rw,DNS:cnpg.serviceradar,DNS:cnpg-rw.serviceradar,DNS:serviceradar-cnpg,DNS:localhost,IP:127.0.0.1" + +CNPG_SAN="DNS:cnpg,DNS:cnpg-rw,DNS:cnpg.serviceradar,DNS:cnpg-rw.serviceradar,DNS:serviceradar-cnpg,DNS:localhost,IP:127.0.0.1" +if [ -n "${CNPG_CERT_EXTRA_IPS:-}" ]; then + for ip in $(echo "$CNPG_CERT_EXTRA_IPS" | tr ',' ' '); do + CNPG_SAN="${CNPG_SAN},IP:${ip}" + done +fi +generate_cert "cnpg" "cnpg.serviceradar" "${CNPG_SAN}" + +# Client cert intended for developers connecting from outside the Docker network +generate_cert "workstation" "workstation.serviceradar" "DNS:workstation,DNS:workstation.serviceradar,DNS:localhost,IP:127.0.0.1" # Other services generate_cert "snmp-checker" "snmp-checker.serviceradar" "DNS:snmp-checker,DNS:snmp-checker.serviceradar,DNS:serviceradar-snmp-checker,DNS:agent.serviceradar,DNS:localhost,IP:127.0.0.1" diff --git a/docker/compose/nginx.conf.template b/docker/compose/nginx.conf.template index 5b17e3373..b133a6b95 100644 --- a/docker/compose/nginx.conf.template +++ b/docker/compose/nginx.conf.template @@ -1,4 +1,4 @@ -# ServiceRadar Web Interface - Nginx Configuration for Docker Compose +# ServiceRadar Web-NG Interface - Nginx Configuration for Docker Compose # DNS resolver for dynamic service discovery # Docker uses 127.0.0.11 by default (configurable via DNS_RESOLVER env) @@ -32,135 +32,18 @@ server { proxy_buffers 4 256k; proxy_busy_buffers_size 256k; - # Static assets from Next.js - location /_next/ { - # Use variable to force runtime DNS resolution - set $web_upstream http://web:3000; - proxy_pass $web_upstream; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } - - # WebSocket streaming endpoint - proxy to API upstream - location /api/stream { - # Use variable to force runtime DNS resolution - # API_UPSTREAM provided via container env; default set in compose - set $api_upstream $API_UPSTREAM; - proxy_pass $api_upstream; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Authorization $http_authorization; - proxy_set_header X-API-Key $http_x_api_key; - proxy_set_header Cookie $http_cookie; - - # WebSocket specific headers - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - - # Timeout settings for WebSocket connections - proxy_read_timeout 86400; - proxy_send_timeout 86400; - proxy_connect_timeout 60; - - # Keep-alive and buffering settings for streaming - proxy_buffering off; - proxy_cache off; - } + # Use variable to force runtime DNS resolution + set $web_upstream http://web-ng:4000; - # API routes handled by Next.js (auth, nodes, status, pollers, admin tooling, SRQL proxy) - location ~ ^/api/(auth|nodes|status|pollers|devices|mcp|config|admin|kv|services|query) { - # Use variable to force runtime DNS resolution - set $web_upstream http://web:3000; + # Phoenix LiveView WebSocket endpoint + location /live/websocket { proxy_pass $web_upstream; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; - } - - # Swagger UI routes - location /swagger/ { - # Use variable to force runtime DNS resolution - set $core_upstream http://core:8090; - proxy_pass $core_upstream; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Authorization $http_authorization; - proxy_pass_header Authorization; - proxy_set_header X-API-Key $http_x_api_key; - - # Allow preflight requests - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'Content-Type, Authorization, X-API-Key'; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - - proxy_read_timeout 300; - proxy_connect_timeout 300; - } - - # API docs redirect - location = /api-docs { - # Use variable to force runtime DNS resolution - set $core_upstream http://core:8090; - proxy_pass $core_upstream; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Authorization $http_authorization; - proxy_set_header X-API-Key $http_x_api_key; - } - - # Fix double slashes in API URLs (frontend bug workaround) - location = /api//query { - # Use variable to force runtime DNS resolution - set $api_upstream $API_UPSTREAM; - proxy_pass $api_upstream/api/query$is_args$args; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Authorization $http_authorization; - proxy_set_header X-API-Key $http_x_api_key; proxy_set_header Cookie $http_cookie; - - # WebSocket support - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_cache_bypass $http_upgrade; - proxy_read_timeout 86400; - } - # Backend API routes (catch-all) -> API upstream - location /api/ { - # Use variable to force runtime DNS resolution - set $api_upstream $API_UPSTREAM; - proxy_pass $api_upstream; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Authorization $http_authorization; - proxy_set_header X-API-Key $http_x_api_key; - - # IMPORTANT: Forward cookies to backend for authentication - proxy_set_header Cookie $http_cookie; - - # WebSocket support for /api/stream proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; @@ -168,28 +51,13 @@ server { proxy_read_timeout 86400; } - # Auth API routes - location /auth/ { - # Use variable to force runtime DNS resolution - set $core_upstream http://core:8090; - proxy_pass $core_upstream; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Cookie $http_cookie; - add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always; - add_header Access-Control-Allow-Headers "Content-Type, Authorization, X-API-Key" always; - } - # Main app location / { - # Use variable to force runtime DNS resolution - set $web_upstream http://web:3000; proxy_pass $web_upstream; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Cookie $http_cookie; } } diff --git a/docs/docs/architecture.md b/docs/docs/architecture.md index eec5dd5ee..b63703e22 100644 --- a/docs/docs/architecture.md +++ b/docs/docs/architecture.md @@ -200,81 +200,15 @@ To fix this, the Device Registry now picks a canonical identity per real-world d - **Sweep normalization**: Any sweep-only alias (`partition:ip`) is merged into the canonical record so Poller results land on the device the UI already knows about. - **Metadata hints**: `_merged_into` markers are written on non-canonical rows so downstream consumers can recognise historical merges. -JetStream key/value buckets disallow characters such as `:` in key segments, so the canonical map encodes each identity value using an `=` escape sequence for any disallowed rune (for example, the MAC `AA:BB` is stored as `AA=3ABB`). Clients call into the shared helper in `pkg/identitymap` before hitting the KV service, ensuring lookups and publishes stay consistent regardless of the original identifier format. +**Note:** KV is NOT used for device identity resolution. CNPG (PostgreSQL) is the authoritative source for identity via the `device_identifiers` table. The IdentityEngine in `pkg/registry` uses strong identifiers (Armis ID, MAC, etc.) to generate deterministic `sr:` UUIDs and stores mappings in CNPG with an in-memory cache for performance. -### Why the backfill exists +### Monitoring identity lookups -Before the canonicalization rules were introduced, the database already contained duplicate `device_id`s—some with long-running poller history. The new registry logic keeps things clean going forward, but we still need to reconcile the backlog so reporting and alerting stay accurate. The one-off backfill job walks the existing Timescale tables, identifies duplicate identities, and emits tombstone `DeviceUpdate` messages to fold the old IDs into their canonical equivalents. +The core lookup path emits OpenTelemetry metrics so operators can see how identity resolution behaves in real time: -Run the backfill from the `serviceradar-core` binary when you are ready to migrate historical data: +- `identitymap_lookup_latency_seconds` (labels: `resolved_via=db|miss|error`, `found=true|false`) measures end-to-end latency for resolving canonical devices via CNPG. -```bash -serviceradar-core --config /etc/serviceradar/core.json --backfill-identities -``` - -Key CLI flags: - -- `--backfill-identities` runs the identity de-duplication and exits without starting gRPC/HTTP services. -- `--backfill-ips` (default `true`) also merges sweep-generated aliases that only differ by IP. -- `--backfill-dry-run` prints what would merge without publishing tombstones—use this on staging first to validate cardinality. -- `--seed-kv-only` seeds the canonical map in NATS KV without emitting tombstones. Pair this with `--backfill-dry-run` during staged rollouts so you can warm caches before mutating historic device rows. - -### Monitoring the canonical identity map - -The registry and backfill jobs now emit OpenTelemetry metrics so operators can see how the identity map behaves in real time: - -- `identitymap_kv_publish_total` (labels: `outcome=created|updated|unchanged|dry_run`) counts the number of KV writes attempted for identity keys. -- `identitymap_conflict_total` (labels: `reason=aborted|already_exists|retry_exhausted`) tracks CAS contention or retries that exceeded their budget. -- `identitymap_lookup_latency_seconds` (labels: `resolved_via=kv|db|miss|error`, `found=true|false`) measures end-to-end latency for resolving canonical devices. - -Conflicts are also logged with the key path and gRPC status code whenever JetStream rejects an optimistic update. Feed these metrics into the OTEL collector (`cmd/otel`) to populate the Prometheus dashboards used during rollout. - -### Exporting canonical identity metrics - -1. Enable OTEL metrics in the core configuration. The same block that controls OTEL logging now wires the metric exporter: - - ```jsonc - { - "logging": { - "level": "info", - "otel": { - "enabled": true, - "endpoint": "otel-collector.default.svc.cluster.local:4317", - "insecure": true - } - } - } - ``` - - The endpoint should point at the OTLP gRPC listener exposed by `cmd/otel` (or any compatible collector). - -2. Update the OTEL collector to expose a Prometheus scrape endpoint. The stock `cmd/otel/otel.toml` now includes: - - ```toml - [server.metrics] - bind_address = "0.0.0.0" - port = 9464 - ``` - - With this block in place the collector serves the aggregated counters at `http://:9464/metrics`. - -3. Add the new time series to Grafana or Alertmanager. Common queries include: - - - `rate(identitymap_kv_publish_total{outcome="created"}[5m])` – confirms new canonical entries are still flowing. - - `identitymap_conflict_total{reason="retry_exhausted"}` – fires when CAS contention needs investigation. - - `histogram_quantile(0.95, rate(identitymap_lookup_latency_seconds_bucket[5m]))` – watches the p95 lookup latency across the fleet. - -4. During feature rollout, chart the metrics alongside the backfill jobs. Pair the Prometheus dashboard with the staging commands in the next section to verify seeding runs beforehand. - -### Rollout checklist - -1. **Staging seed:** run `serviceradar-core --config /etc/serviceradar/core.json --backfill-identities --seed-kv-only --backfill-dry-run` to pre-populate NATS KV without mutating history. Watch `identitymap_kv_publish_total{outcome="dry_run"}` to confirm keys are enumerated. -2. **Validate signals:** scrape `identitymap_lookup_latency_seconds` and `identitymap_conflict_total` for at least one sweep interval. Conflicts should stay at zero and keep lookup latency below the alert threshold (p95 under 250 ms). -3. **Commit the backfill:** rerun the job without `--backfill-dry-run` (and optionally with `--seed-kv-only=false`) to emit the tombstones and fold historical rows. -4. **Flip the feature flag:** deploy the updated core configuration so the registry publishes canonical IDs by default (keeping the legacy tombstone path as a safety net). Repeat the same sequence in production once staging metrics hold steady. -5. **Post-rollout watch:** leave the Prometheus alerts in place for at least one week; any sustained rise in `identitymap_conflict_total{reason="retry_exhausted"}` should trigger an incident to investigate duplicate identifiers. - -When the backfill finishes it logs the totals and exits. After that, the Device Registry enforces the same canonicalization rules for all future DeviceUpdate events flowing from Armis, KV sweeps, and Poller results. +Feed these metrics into the OTEL collector (`cmd/otel`) to populate Prometheus dashboards. ## Security Architecture diff --git a/openspec/AGENTS.md b/openspec/AGENTS.md index a46219849..154545c85 100644 --- a/openspec/AGENTS.md +++ b/openspec/AGENTS.md @@ -446,6 +446,78 @@ helm push serviceradar-1.0.71-pre2.tgz oci://ghcr.io/carverauto/charts rm serviceradar-1.0.71-pre2.tgz ``` +## Development Environment + +### Docker Compose Stack + +The `docker-compose.yml` in the project root provides the full development stack. Key services: +- **cnpg** - PostgreSQL with TimescaleDB (port 5455) +- **core** - ServiceRadar core service (ports 8090, 9090, 50052) +- **nats** - NATS messaging (ports 4222, 6222, 8222) +- **datasvc** - Data service (port 50057) + +### Starting the Stack + +```bash +# Start all services +docker compose up -d + +# Check service health +docker compose ps +docker logs serviceradar-core-mtls --tail 50 +``` + +### Database Access + +By default, CNPG binds to `127.0.0.1:5455` for security. For remote access during development: + +```bash +# Allow external connections (for remote dev) +CNPG_PUBLIC_BIND=0.0.0.0 docker compose up -d cnpg + +# Or restart the whole stack with external DB access +CNPG_PUBLIC_BIND=0.0.0.0 docker compose up -d +``` + +### Elixir Web App (web-ng) + +The Phoenix LiveView app in `web-ng/` connects to CNPG. To run locally: + +```bash +cd web-ng + +# Connect to local docker stack +CNPG_HOST=localhost CNPG_PORT=5455 mix phx.server + +# Connect to remote docker host +CNPG_HOST=192.168.2.134 CNPG_PORT=5455 mix phx.server +``` + +Access the app at http://localhost:4000 + +### Environment Variables + +Common development overrides: +| Variable | Default | Description | +|----------|---------|-------------| +| `CNPG_PUBLIC_BIND` | `127.0.0.1` | Database bind address | +| `CNPG_PUBLIC_PORT` | `5455` | Database external port | +| `CNPG_HOST` | `cnpg` (in docker) | Database hostname | +| `CNPG_PORT` | `5432` | Database port | +| `CNPG_USERNAME` | `serviceradar` | Database user | +| `CNPG_PASSWORD` | `serviceradar` | Database password | +| `APP_TAG` | `v1.0.67` | Container image tag | + +### Rebuilding After Code Changes + +```bash +# Rebuild specific service with git SHA tag (for dev) +APP_TAG=sha-$(git rev-parse --short HEAD) docker compose up -d --build core + +# Or pull latest from registry +APP_TAG=v1.0.78 docker compose pull && APP_TAG=v1.0.78 docker compose up -d +``` + ## Best Practices ### Simplicity First diff --git a/openspec/changes/add-age-relationship-graph/design.md b/openspec/changes/add-age-relationship-graph/design.md deleted file mode 100644 index c808ed732..000000000 --- a/openspec/changes/add-age-relationship-graph/design.md +++ /dev/null @@ -1,29 +0,0 @@ -## Context -Apache AGE is already bundled in our CNPG images but unused. Checker hosts still leak into inventory as phantom devices (e.g., sysmon/mapper/zen health probes showing up as `agent` devices). We need a first-class relationship graph (devices, collectors, services/checkers, interfaces, capabilities) so the inventory can distinguish collector-owned services from monitored targets and show topology/metrics badges. - -## Goals / Non-Goals -- Goals: bootstrap an AGE graph in CNPG, define node/edge schema, ingest registry/mapper/checker data, ingest DIRE outputs, expose graph queries for inventory/SRQL/AI, provide rebuild/backfill and drift detection. -- Non-Goals: replace relational tables for registry/history, redesign SRQL planner wholesale, or add new external stores beyond AGE inside CNPG. - -## Decisions -- Graph: create `serviceradar` AGE graph; enable `age` extension for core/SRQL connections. -- Nodes: `Device` (canonical_device_id), `Collector` (serviceradar:agent|poller), `Service` (internal + external, keyed by service device ID), `Interface` (device/name), optional `Capability` nodes for SNMP/OTEL/sysmon/healthcheck. -- Edges: `HOSTS_SERVICE` (Collector → Service), `RUNS_CHECKER` (Collector → Service or CheckerDefinition), `TARGETS` (Service/Checker → Device), `HAS_INTERFACE`/`ON_DEVICE` (Device ↔ Interface), `CONNECTS_TO` (Interface ↔ Interface), `PROVIDES_CAPABILITY` (Service/Device ↔ Capability), `REPORTED_BY` (Service/Device → Collector for provenance). -- Ingestion: DIRE emits canonical device updates into AGE; core registry emits Cypher `MERGE` writes; mapper interface pipeline writes Interfaces and neighbor CONNECTS_TO edges via DIRE-resolved device IDs; checker ingestion writes RUNS_CHECKER/TARGETS without promoting collector host IPs to Device nodes; writers tolerate AGE failures (queue/retry) without blocking registry. -- Queries: DAO/API returns device neighborhoods (collector → service/checker → target → interfaces + capabilities) with flags for collector-owned services; SRQL gains graph-backed primitives for inventory/topology; provide stored Cypher templates for UI and AI use. -- Backfill: job to rehydrate graph from unified_devices (DIRE), service registry, mapper interface inventory, and recent checker history. -- SRQL integration: add a dedicated graph entity `device_graph` (`in:device_graph`) that issues AGE `cypher(...)` and casts results to `jsonb` to avoid `agtype` bindings; normalize graph_path/search_path per session for safety; return structured JSON (device, collectors, services/checkers with collector-owned flag, targets, interfaces, capability badges) instead of raw Cypher rows; extend SRQL test harness to seed the AGE graph with a minimal neighborhood and assert the contract. - -## Risks / Trade-offs -- AGE ingestion lag could desync UI badges from registry state → mitigate with retry + drift metrics and rebuild job. -- Graph growth from checker history → limit to current edges (latest per target/service) and keep history in relational tables. -- Multi-tenant concerns if we later split graphs per customer → start with namespaced IDs to allow future partitioning. - -## Migration Plan -- Add migration to create graph + indexes; deploy writer paths behind a feature flag. -- Run backfill job once deployed; monitor drift metrics; adjust retention limits for checker→target edges if needed. - -## Open Questions -- Should capability badges live as dedicated nodes or edge properties for simpler queries? -- Do we need per-checker execution edges (history) or only latest per service/target? (lean latest + relational history) -- Should SRQL expose graph traversal (AGE cypher) directly or only through curated APIs? diff --git a/openspec/changes/add-age-relationship-graph/proposal.md b/openspec/changes/add-age-relationship-graph/proposal.md deleted file mode 100644 index c1e7cc26f..000000000 --- a/openspec/changes/add-age-relationship-graph/proposal.md +++ /dev/null @@ -1,25 +0,0 @@ -# Change: Apache AGE Relationship Graph for Devices and Services - -## Why -Checker hosts are still leaking into inventory as phantom devices (e.g., sysmon/mapper/zen health probes appearing as `agent` devices) even after the fix-checker-device-identity rollout. Mapper discoveries and DIRE outputs are not represented as relationships, so interfaces and neighbors cannot be navigated cleanly. We need an explicit relationship model to keep collector-owned services/checkers attached to their agents, surface SNMP targets with their interfaces, drive relationship-aware inventory badges/topology views, and give the UI/AI clear labels instead of misclassified devices. Apache AGE is already bundled in our CNPG images; we should start using it to persist the device/service/collector graph. - -## What Changes -- Bootstrap an `age` graph (`serviceradar`) in CNPG with node/edge types for devices, interfaces, services (internal + target), collectors, and checker definitions, plus indexes on canonical IDs. -- Ingest registry updates (agents/pollers/services), mapper discoveries, checker results, and sync/DIRE device updates into AGE, merging on canonical IDs and ensuring collector host metadata does NOT create new device nodes. -- Store relationships for health checks and metrics capabilities (e.g., SNMP/OTEL/sysmon) so inventory can label nodes like "sync (collector service)" or show that a router has SNMP metrics available. -- Expose graph query surfaces for the UI/API/SRQL to fetch a device’s neighborhood (collector → service/checker → target → interfaces) and render badges instead of duplicating devices, including a dedicated SRQL entity `in:device_graph`. -- Add a rebuild/backfill job to regenerate the graph from relational tables (unified_devices, registry, mapper) and emit observability around graph drift or ingestion failures. -- Keep the Device Inventory table flat while using graph relationships for badges/filters; show relationships in device detail/graph canvases and a Network Discovery/Interfaces view without polluting the device list with interfaces. -- Make the graph the source for AI/device reasoning so answers use canonical IDs/relationships instead of flat unified_devices queries. - -## Impact -- Affected specs: `device-relationship-graph` (new), `cnpg` (AGE graph initialization), `device-identity-reconciliation` (collector-vs-target handling), `web` inventory visualization -- Affected code: CNPG migrations/bootstrap, core registry ingestion, mapper interface pipeline, checker ingestion, DIRE output fanout to AGE, SRQL/graph queries, web inventory APIs/components, AI query wiring - -## Scope updates (2025-12-01) -- Drop the hierarchy-first table in the main Device Inventory; return to the flat inventory view and surface relationships via badges plus detail/graph views (hierarchy stays out of the table for now). -- Network neighborhood canvas: large sweep/Armis/NetBox imports (50k+ nodes) require an alternative visualization or aggregation strategy; leaving this as an open item. - -## Scope updates (2025-12-03) -- Replace the ReactFlow neighborhood canvas with a D3 hierarchy/cluster-based dendrogram that auto-lays out collectors → services/checkers → targets/interfaces so edges always render and the graph stays compact on the device detail page. -- Add a large-neighborhood fallback that clusters sweep/Armis-scale results (e.g., 50k+ devices) by CIDR using a pack layout instead of drawing every edge to avoid blowing up the device detail graph. diff --git a/openspec/changes/add-age-relationship-graph/specs/device-relationship-graph/spec.md b/openspec/changes/add-age-relationship-graph/specs/device-relationship-graph/spec.md deleted file mode 100644 index 967d267a7..000000000 --- a/openspec/changes/add-age-relationship-graph/specs/device-relationship-graph/spec.md +++ /dev/null @@ -1,148 +0,0 @@ -## ADDED Requirements -### Requirement: AGE graph is bootstrapped in CNPG -The system SHALL create and maintain an Apache AGE graph named `serviceradar` in the CNPG database with the AGE extension enabled for all core/SRQL connections. - -#### Scenario: Graph created at startup -- **WHEN** CNPG schema migrations run on a fresh cluster -- **THEN** `CREATE EXTENSION IF NOT EXISTS age; SELECT create_graph('serviceradar');` completes without error and the graph is available for subsequent queries. - -#### Scenario: Idempotent graph readiness -- **WHEN** migrations or bootstrap jobs rerun on an existing cluster -- **THEN** the graph creation step is a no-op and AGE remains usable without dropping data. - -### Requirement: Canonical nodes for devices, services, and collectors -The system SHALL model devices, services, and collectors as graph nodes keyed by their canonical IDs so they merge instead of duplicating when IPs or hostnames change. - -#### Scenario: Device node merge by canonical ID -- **WHEN** a device update arrives for `canonical_device_id=sr:70c5563c-592f-458a-ab46-cb635fb01e3d` with a new IP -- **THEN** the AGE graph issues `MERGE (d:Device {id: 'sr:70c...'}) SET d.ip = '...'` and does not create a second Device node. - -#### Scenario: Service node for internal component -- **WHEN** the sync service reports status with device ID `serviceradar:sync:sync` from agent `docker-agent` -- **THEN** the graph `MERGE`s a `Service {id: 'serviceradar:sync:sync', type: 'sync'}` node and links it to `Collector {id: 'serviceradar:agent:docker-agent'}` without creating a new Device node named `agent`. - -#### Scenario: Collector identity from registry -- **WHEN** an agent or poller reconnects with a different pod IP -- **THEN** the graph retains a single `Collector` node keyed by `serviceradar:agent:`/`serviceradar:poller:` and updates the IP property instead of duplicating the collector. - -### Requirement: Checker relationships attach to collectors, not devices -The system SHALL represent checker definitions and executions as relationships from collectors and services to their targets, and SHALL NOT create device nodes for collector host metadata. - -#### Scenario: Collector health check does not create a device -- **WHEN** a checker result reports `checker_service=sync`, `checker_host_ip=172.18.0.5`, `collector_agent_id=docker-agent` -- **THEN** the graph records `(:Collector {id:'serviceradar:agent:docker-agent'})-[:RUNS_CHECKER {service:'sync'}]->(:Service {id:'serviceradar:sync:sync'})` and no Device node is created for `172.18.0.5`. - -#### Scenario: Remote sysmon target becomes the device node -- **WHEN** a sysmon checker running on `docker-agent` polls target `192.168.1.218` -- **THEN** the graph links `Collector docker-agent` via `RUNS_CHECKER` to a `Service` node (sysmon) with a `TARGETS` edge to `Device {ip:'192.168.1.218'}`, and the collector IP is not promoted to a Device node. - -### Requirement: Mapper-discovered interfaces are modeled as first-class nodes -The system SHALL store network interfaces discovered by mapper as Interface nodes linked to their devices and peer interfaces when topology is known. - -#### Scenario: Interface attached to device -- **WHEN** mapper reports interface `eth0` on device `serviceradar:router:edge1` with MAC `aa:bb:cc:dd:ee:ff` -- **THEN** the graph `MERGE`s `(:Interface {id:'serviceradar:router:edge1/eth0', mac:'aa:bb:cc:dd:ee:ff'})-[:ON_DEVICE]->(:Device {id:'serviceradar:router:edge1'})`. - -#### Scenario: Interface peer linkage -- **WHEN** mapper reports that `edge1/eth0` connects to `switch1/gi0/1` -- **THEN** the graph adds a `CONNECTS_TO` edge between the two Interface nodes to capture topology. - -### Requirement: Capabilities and metrics availability are encoded in the graph -The system SHALL encode monitoring capabilities (e.g., SNMP, OTEL, sysmon) as graph relationships so the UI can render badges and filter results. - -#### Scenario: SNMP metrics badge for router -- **WHEN** SNMP collection succeeds for target `192.168.1.1` -- **THEN** the graph links the target Device to a `Capability {type:'snmp'}` node (or edge property) so the inventory shows an SNMP metrics indicator for that device. - -#### Scenario: Internal service health capability -- **WHEN** the sync service health check is ingested -- **THEN** the corresponding Service node is marked with `capabilities:['healthcheck']` (or a `PROVIDES_CAPABILITY` edge) so the UI can label it as a collector-owned health check rather than a standalone device. - -### Requirement: Graph neighborhood query for inventory -The system SHALL expose an API/DAO to return a device’s immediate neighborhood (collector, services/checkers, interfaces, capabilities) for rendering in the inventory view. - -#### Scenario: Inventory fetch distinguishes collector-owned services -- **WHEN** the UI requests the neighborhood for `serviceradar:sync:sync` -- **THEN** the API returns the Collector → Service relation with a flag indicating it is collector-owned, enabling the UI to label it instead of listing it as a device. - -#### Scenario: Device neighborhood includes topology and collectors -- **WHEN** the UI requests the neighborhood for a router device -- **THEN** the API returns attached Interfaces, any CONNECTS_TO peers, and the Collectors/Services that target the device so operators can trace how the device is monitored. - -### Requirement: SRQL exposes graph-backed neighborhood queries -The system SHALL provide a SRQL query path that reads from the AGE graph and returns structured JSON for device neighborhoods instead of relying on relational joins. - -#### Scenario: SRQL graph query returns structured JSON -- **WHEN** a SRQL query requests a device neighborhood via the dedicated graph entity `in:device_graph` -- **THEN** the service executes an AGE `cypher` against graph_path `serviceradar`, casts the result to JSON containing the device, collectors, services/checkers (with a collector-owned flag), targets, interfaces, and capability badges, and does not query `unified_devices` for relationships. - -#### Scenario: SRQL graph harness seeds AGE data -- **WHEN** SRQL integration tests run -- **THEN** the harness bootstraps the AGE graph (graph_path, labels) and seeds a minimal neighborhood so the graph query contract is validated without `agtype` binding errors. - -### Requirement: Graph rebuild and drift detection -The system SHALL provide a job to rebuild the AGE graph from relational sources and emit drift metrics/alerts when graph ingestion fails or diverges. - -#### Scenario: Rebuild restores graph after failure -- **WHEN** the rebuild job runs against an empty AGE graph -- **THEN** it rehydrates Devices, Services, Collectors, Interfaces, and their edges from canonical tables without reintroducing phantom collector devices. - -#### Scenario: Drift alert on ingestion failures -- **WHEN** AGE writes fail for more than a configurable threshold (e.g., 5 minutes) -- **THEN** metrics/logs indicate graph drift and operators are alerted to rerun the rebuild job. - -### Requirement: DIRE feeds the graph as the source of canonical device identity -The system SHALL emit DIRE-resolved device records into the AGE graph so the graph remains aligned with `unified_devices` and continues to apply collector-vs-target rules. - -#### Scenario: DIRE update merges Device node -- **WHEN** DIRE resolves a device update for `canonical_device_id=sr:50487279-694c-44be-9ef3-40e1fe1eea57` -- **THEN** the graph `MERGE`s the Device node by that ID and updates properties (IP/hostname) without creating a new node, keeping parity with `unified_devices`. - -#### Scenario: Collector host IP from checker is ignored -- **WHEN** DIRE receives a checker-sourced sighting whose host IP matches the collector `docker-agent` -- **THEN** no new Device node is created in AGE for that IP; the relationship is kept between the collector and the service/checker node only. - -### Requirement: Mapper seeds and neighbors align to canonical devices -The system SHALL route mapper discoveries (seed targets, interfaces, neighbor devices) through DIRE and into the AGE graph so interfaces and neighbors attach to the correct canonical devices. - -#### Scenario: Mapper seed promotes to device and interfaces -- **WHEN** a mapper job starts with seed `192.168.1.1` -- **THEN** DIRE promotes the seed into a canonical Device node; mapper-discovered interfaces on that seed attach to that Device in the graph. - -#### Scenario: Neighbor discovery creates new device with interfaces -- **WHEN** mapper learns neighbor `192.168.10.1` via LLDP/CDP from the seed -- **THEN** the graph `MERGE`s a Device for the neighbor via DIRE canonical ID and attaches any discovered interfaces to that neighbor Device, with `CONNECTS_TO` edges between peer interfaces. - -### Requirement: Inventory queries read from the graph, not only relational tables -The system SHALL serve device inventory and neighborhood queries from AGE (via DAO/SRQL) so UI/AI surfaces use graph relationships instead of flat `unified_devices` joins. - -#### Scenario: Device Inventory uses graph neighborhood -- **WHEN** the UI requests the Device Inventory row for a device -- **THEN** the API fetches the device’s neighborhood from AGE (collector → services/checkers → capabilities) and renders badges/children without querying `unified_devices` directly. - -#### Scenario: SRQL query sources topology from AGE -- **WHEN** SRQL is asked for a device’s connected collectors or services -- **THEN** it reads from the AGE graph to return relationships, ensuring poller/agent ownership and service health checks appear as children rather than duplicate devices. - -### Requirement: Flat inventory layout uses graph context and link-outs -The system SHALL keep the main Device Inventory table flat while using AGE relationships for badges/filters, and rely on dedicated graph/detail plus discovery views to navigate hierarchy and interfaces. - -#### Scenario: Flat inventory row per device -- **WHEN** viewing the Device Inventory -- **THEN** each device renders as a single row with graph-sourced badges/toggles for collector-owned services/checkers and capabilities, without nested or indented rows. - -#### Scenario: Interfaces page shows devices with discovered interfaces only -- **WHEN** opening the Network → Discovery → Interfaces page -- **THEN** only devices that have mapper-discovered interfaces are listed; expanding a device shows its interfaces and peer links, and interfaces are not shown as top-level devices elsewhere. - -#### Scenario: Poller-to-agent relationships reachable via detail views -- **WHEN** a poller has registered agents -- **THEN** the flat inventory provides link-outs or actions to the device detail/graph view to see poller↔agent/service relationships while keeping the main table un-nested. - -#### Scenario: Device detail renders a graph canvas -- **WHEN** viewing a device with collectors, services, targets, and interfaces -- **THEN** the detail page renders a D3 hierarchy/cluster (dendrogram) view that auto-lays out the device, its collectors/child agents, services/checkers, targets, and interfaces on a single canvas with visible edges, badges/links back to the underlying nodes, and interfaces shown as attached nodes instead of top-level devices. - -#### Scenario: Large neighborhoods collapse into CIDR clusters -- **WHEN** the neighborhood contains a sweep/Armis/NetBox-scale result set (tens of thousands of targets or more) -- **THEN** the detail view switches to a pack layout that clusters targets by CIDR/prefix to avoid rendering tens of thousands of individual edges while keeping collectors/services recognizable and navigable. diff --git a/openspec/changes/add-age-relationship-graph/tasks.md b/openspec/changes/add-age-relationship-graph/tasks.md deleted file mode 100644 index 72bf1ddbf..000000000 --- a/openspec/changes/add-age-relationship-graph/tasks.md +++ /dev/null @@ -1,68 +0,0 @@ -## 1. Bootstrap AGE graph in CNPG - -- [x] 1.1 Add migration/init script to `create_graph('serviceradar')` and enable AGE search_path for core/SRQL connections -- [x] 1.2 Define indexes/constraints for canonical IDs on Device, Service, Collector, Interface nodes and ensure idempotent creation -- [x] 1.3 Add ops/runbook steps for Docker Compose + demo k8s to verify AGE graph readiness -- [x] 1.4 Add AGE write credentials/config to DIRE/SRQL/core so they can emit Cypher writes - -## 2. Graph schema and contracts - -- [x] 2.1 Define node labels/properties for Device, Interface, Service (internal vs target), Collector (agent/poller), CheckerDefinition, Capability (snmp/otel/sysmon) -- [x] 2.2 Define edge types/properties (HOSTS_SERVICE, RUNS_CHECKER, TARGETS, HAS_INTERFACE, CONNECTS_TO, PROVIDES_CAPABILITY, REPORTED_BY) -- [x] 2.3 Document canonical ID format mapping (unified_device.canonical_device_id → Device.id, service_device_id → Service.id, agent/poller ids → Collector.id, interface fingerprint) -- [x] 2.4 Add AGE schema docs to `docs/docs/` (or `openspec/design.md`) for reference -- [x] 2.5 Map DIRE resolution outputs to graph nodes/edges so the graph stays aligned with unified_devices - -## 3. Ingestion pipelines into AGE - -- [x] 3.1 Wire core registry/device updates (via DIRE output) to `MERGE` Device + Capability edges (SNMP/OTEL/sysmon) without creating collector-host devices -- [x] 3.2 Emit Service nodes/edges for internal services (datasvc/sync/mapper/otel/zen) attached to their collector nodes instead of standalone devices -- [x] 3.3 Map checker results to TARGETS/RUNS_CHECKER edges from Service/CheckerDefinition → Device targets; ensure collector host metadata is ignored for node creation -- [x] 3.4 Ingest mapper interface discoveries as Interface nodes attached to Devices; add CONNECTS_TO edges between interfaces when topology is known; seed devices discovered via mapper through DIRE to reuse canonical IDs -- [x] 3.5 Provide backpressure/error handling so AGE failures do not block registry ingestion, with metrics + logs -- [x] 3.6 Add reconciliation/backfill from unified_devices + mapper discoveries to heal graph drift - -## 4. API surfaces and queries - -- [x] 4.1 Add API endpoint/DAO to fetch a device neighborhood (collector → service/checker → target → interfaces) from AGE -- [x] 4.2 Add filters to return only collector-owned services/checkers vs external targets -- [x] 4.3 Provide Cypher snippets or stored procedures for common queries (device summary, path to collector, service capability badges) -- [x] 4.4 Add a new SRQL entity `in:device_graph` that reads from AGE for inventory-like queries (neighborhood/relationships) instead of only `unified_devices`, returning structured JSON (collector-owned flags, capabilities, interfaces) from AGE Cypher rather than raw agtype -- [x] 4.5 Expose graph queries for AI copilots so responses draw from canonical relationships -- [x] 4.6 Add SRQL AGE bootstrap + fixtures in tests (graph_path, labels, sample neighborhood) and integration tests that validate the graph query contract and JSON shape - -## 5. Web inventory integration - -- [x] 5.1 Update inventory UI to show graph-derived badges (e.g., "collector service", "SNMP metrics") instead of duplicating devices -- [x] 5.2 Ensure entries like sync/mapper/zen health checks render as services on `docker-agent`/poller, not as separate devices named "agent" -- [x] 5.3 Add UI affordances to hide/filter collector-owned health checks while keeping true targets visible -- [x] 5.4 Restore the Device Inventory to the original flat table while keeping graph-sourced badges/filters, and keep the Network Discovery/Interfaces view device-scoped (interfaces grouped under a device, no nested inventory rows) -- [x] 5.5 Add a graph-based device detail view (ReactFlow) showing collectors, services, targets, and interfaces in a single neighborhood canvas with badges/links -- [x] 5.6 Remove hierarchy-first/auto-expanded poller rows from the Device Inventory while keeping poller/agent/service relationships reachable via graph-based detail views or link-outs; preserve pagination/performance for large inventories (50k–5M devices) -- [x] 5.7 Add clear link-outs from the flat inventory rows to the graph canvas/network discovery views so relationships remain discoverable without nested table rows -- [x] 5.8 Replace the ReactFlow neighborhood canvas with a D3 hierarchy/cluster-based dendrogram that auto-lays out collectors → services/checkers → targets/interfaces so edges stay visible and the graph remains compact on device detail pages -- [x] 5.9 Add a large-neighborhood fallback that clusters sweep-scale (10k–50k+) targets by CIDR using a pack layout instead of rendering all edges/vertices in the device detail graph - -## 6. Backfill, testing, and validation - -- [x] 6.1 Add backfill/rebuild job to regenerate AGE graph from relational sources (device updates, mapper interfaces, checker history) -- [x] 6.2 Add unit/integration tests for graph ingestion and neighborhood queries (including collector-vs-target distinction) -- [x] 6.3 Document validation steps: run on docker-compose.mtls and confirm phantom checker devices do not reappear; verify SNMP target shows metrics badge -- [ ] 6.4 Validate mapper-seeded devices (seed router + neighbors) create correct Device/Interface/CONNECTS_TO relationships in the graph -- [ ] 6.5 Validate SRQL/graph queries back the UI without unified_devices joins - ---- - -Progress notes: -- Compose mTLS stack rebuilt on APP_TAG `sha-3c7fc58993090562980d9fa62aab7caeb4c8db19`; search_path corrected to `public, ag_catalog`, CNPG collation refreshed, and AGE cypher() params in the graph writer converted to stringified JSON for compatibility. -- Core/poller/agent/web running; data now lands in `public` (unified_devices=10, pollers=1, logs/traces populated). UI validation + AGE backfill still outstanding. -- AGE graph writer cypher calls reworked to use parameter maps (no format() dollar quoting); compose refreshed with APP_TAG `sha-d03721f47c5c7b4575da2a3f00c475bcfa0b0237` and services healthy. -- AGE neighborhood helper updated to drop graph_path, pin search_path to `ag_catalog,pg_catalog`, cast agtype via text/jsonb, and aggregate property maps to avoid null graph responses; re-applied migration to mTLS CNPG and verified age_device_neighborhood returns collectors/services/targets. -- Inventory UI now hides collector-owned service devices by default, surfaces them as collector services with badges/toggle + SRQL link-outs, and defaults graph cards to collector-owned view for service nodes. -- Device inventory graph cards now render child collectors for poller roots; network discovery view groups interfaces under their owning devices (no interface rows as top-level results) with per-device interface tables. -- ReactFlow-based device detail view + graph-centric inventory view still pending; will anchor on `in:device_graph` neighborhood responses and include interfaces in the canvas. -- Dagre-based ReactFlow layout landed for device neighborhoods, hierarchy-first device table (now being rolled back to flat) updated, and mTLS compose rebuilt on APP_TAG `sha-a9a9af5644f28b3ba13be71264307f981b21fa0e` with images pushed to GHCR. -- Inventory table previously auto-expanded pollers by default, removed the AGE/SRQL banner in child rows, restored ICMP sparklines for ICMP-capable devices, and limited sysmon/SNMP indicators to supported devices to avoid stuck skeletons; collector-service info banner on the dashboard was removed (now being reverted to a flat table per 5.4/5.6). -- Discovery source badges now normalize object-shaped entries to avoid React crashes, and compose mTLS stack refreshed on the latest `sha-a9a9af5644f28b3ba13be71264307f981b21fa0e` build (web digest `f3cca016019225d25212aaa4f6ea1fdf2c6b495cfa5dd03f50f16d789c46b81c`). -- UI cleanup: Removed "Hierarchy view is hiding X rows" banner, Capability Status table, Metadata card, and Graph Relationships card from the main device inventory expanded row. DeviceDetail page restructured to use full-width sections, reduced Network Neighborhood canvas from 500px to 320px, and added click-to-focus behavior for canvas zoom/pan (scroll wheel no longer captured on hover—requires click first). -- Scope pivot: reverting the Device Inventory back to a flat layout (hierarchy stays in graph/detail views); relationship navigation will rely on badges/link-outs and the dedicated network discovery/graph canvases. diff --git a/openspec/changes/add-demo-staging-environment/proposal.md b/openspec/changes/add-demo-staging-environment/proposal.md deleted file mode 100644 index 9bb39ebf7..000000000 --- a/openspec/changes/add-demo-staging-environment/proposal.md +++ /dev/null @@ -1,26 +0,0 @@ -## Why -- We only ship one Kustomize overlay (`k8s/demo/prod`) for the public demo namespace and it is hard-coded to the `demo` namespace plus `demo.serviceradar.cloud` DNS/secret names, so there is no safe space to rehearse config or image updates before touching the real demo cluster. -- Platform teams asked for a mirror of the demo stack that lives in its own namespace (`demo-staging`) and exposes a new DNS record (`demo-staging.serviceradar.cloud`) so product, docs, and support can validate rollouts without impacting customers. -- Adding a parallel manifest tree has cross-cutting implications (deploy scripts, ingress annotations, service alias names, TLS secrets, documentation), so we need an approved OpenSpec change before cloning everything under `k8s/demo` into `k8s/demo-staging`. - -## What Changes -- Expand the existing `k8s/demo/staging/` overlay (plus helper scripts) so it mirrors the `k8s/demo/prod/` deployment but defaults every namespace label, hostname, TLS secret, and ExternalName reference to `demo-staging`. -- Update Kustomize overlays so `kustomize build k8s/demo/staging` emits the same components that `k8s/demo/prod` currently deploys, just scoped to the new namespace and DNS; keep the shared base under `k8s/demo/base` for all common resources. -- Extend deployment tooling and docs (`k8s/demo/README.md`, `k8s/demo/deploy.sh`, any runbooks in `docs/docs/agents.md`) to explain how to apply the new overlay, what DNS/cert-manager prerequisites exist for `demo-staging.serviceradar.cloud`, and how to run validations (e.g., `kubectl get ingress -n demo-staging`). -- Ensure ingress annotations and secrets line up with the new hostname (new TLS secret name, External DNS hostname annotation, Kong/web routing) and capture any DNS automation tasks that ops has to run before the manifests can converge. - -## Scope -### In Scope -- Copying or refactoring the manifests under `k8s/demo` so the `staging` overlay within that tree renders an equivalent set of workloads without changing container images or resource shapes (other than namespace/DNS). -- Namespace/DNS specific tweaks: namespace manifests, labels/annotations, `external-dns` hostname, TLS secret names, service-alias ExternalName targets, and any scripts that hard-code `demo` today. -- README/runbook updates to document how to bootstrap and validate the new environment. - -### Out of Scope -- Changing component configuration, scaling, or images beyond what is required to rename the namespace/hostname. -- Provisioning the actual DNS entries or certificates in the target cluster (the manifests should reference them, but infra provisioning stays manual/out-of-band). -- Rearchitecting the Kustomize layout; we will mirror the existing approach even if it duplicates files. - -## Impact -- We double the number of manifests we must keep in sync with the demo environment, so releases must include instructions for updating both directories. -- External-DNS/cert-manager will create additional records/secrets, and operators will need to monitor a second namespace worth of workloads. -- Deploy tooling gains a new environment selector, so CI/CD or manual scripts that assume `demo` may need minor updates when this work lands. diff --git a/openspec/changes/add-demo-staging-environment/specs/k8s-environments/spec.md b/openspec/changes/add-demo-staging-environment/specs/k8s-environments/spec.md deleted file mode 100644 index 733655e0b..000000000 --- a/openspec/changes/add-demo-staging-environment/specs/k8s-environments/spec.md +++ /dev/null @@ -1,34 +0,0 @@ -## ADDED Requirements -### Requirement: Demo-staging manifests mirror the demo deployment -ServiceRadar MUST ship a `k8s/demo/staging` overlay whose rendered output is configuration-identical to `k8s/demo/prod` except for the namespace/hostname values so engineers can rehearse demo changes without touching the live namespace. - -#### Scenario: Kustomize render targets the demo-staging namespace -- **GIVEN** the repository checkout contains the new manifest tree -- **WHEN** `kustomize build k8s/demo/staging` runs -- **THEN** the output includes every workload from `k8s/demo/prod` and all generated YAML documents declare `metadata.namespace: demo-staging` (or rely on `namespace: demo-staging` at the Kustomize level) instead of `demo`. - -#### Scenario: Service aliases follow demo-staging FQDNs -- **GIVEN** the new manifests are applied to a cluster -- **WHEN** `kubectl get service core -n demo-staging -o yaml` (and the other alias Services) runs -- **THEN** each `spec.externalName` points at `*.demo-staging.svc.cluster.local`, matching the pods that run in that namespace. - -### Requirement: Demo-staging ingress and DNS use demo-staging.serviceradar.cloud -The demo-staging overlay MUST publish an ingress that front-ends the namespace via `demo-staging.serviceradar.cloud`, carries a unique TLS secret, and exposes the same path rules as the primary demo ingress so any UI/API smoke tests behave the same way. - -#### Scenario: External-DNS annotation advertises the new hostname -- **GIVEN** the ingress manifest inside `k8s/demo/staging` -- **WHEN** it is inspected -- **THEN** it sets `external-dns.alpha.kubernetes.io/hostname: "demo-staging.serviceradar.cloud"` and the TLS block lists `demo-staging.serviceradar.cloud` so cert-manager knows which certificate to request. - -#### Scenario: HTTP routing matches the demo ingress -- **GIVEN** `kubectl get ingress serviceradar-ingress -n demo-staging -o yaml` -- **WHEN** you compare its `spec.rules[].http.paths` with the demo ingress -- **THEN** it exposes the `_next`, `/api/stream`, `/api/*`, `/auth`, and `/` routes so web, Kong, and core all receive traffic just like they do in the demo namespace. - -### Requirement: Deployment docs and tooling cover demo-staging -We MUST document how to deploy, configure secrets, and validate workloads in the new namespace so operators can spin it up repeatedly without reverse-engineering the manifests. - -#### Scenario: Deploy script or README describes demo-staging steps -- **GIVEN** a teammate reads `k8s/demo/README.md`, `k8s/demo/deploy.sh`, or a sibling document dedicated to the new environment -- **WHEN** they follow the documented steps -- **THEN** they learn how to create the `demo-staging` namespace, generate the required secrets/configmap, run `kubectl apply -k k8s/demo/staging`, and validate that `https://demo-staging.serviceradar.cloud` answers. diff --git a/openspec/changes/add-demo-staging-environment/tasks.md b/openspec/changes/add-demo-staging-environment/tasks.md deleted file mode 100644 index 9387df708..000000000 --- a/openspec/changes/add-demo-staging-environment/tasks.md +++ /dev/null @@ -1,13 +0,0 @@ -## 1. Mirror demo manifests into demo-staging -- [x] 1.1 Clone the `k8s/demo/prod` overlay into `k8s/demo/staging` (or otherwise sync their resource lists) so `kustomize build k8s/demo/staging` renders the same workloads as prod. -- [x] 1.2 Update the `k8s/demo/staging/kustomization.yaml`, `namespace.yaml`, and every manifest under that overlay so the namespace, labels, and service selectors read `demo-staging` instead of `demo` (includes `service-aliases`, `ExternalName` targets, and any hard-coded namespace strings embedded in YAML or scripts). -- [x] 1.3 Adjust ingress resources and annotations to use `demo-staging.serviceradar.cloud` (annotations, TLS secret name, host rules, certificate references) and ensure the TLS secret reference matches the certificate that will back the new hostname. - -## 2. Tooling + documentation updates -- [x] 2.1 Update `k8s/demo/README.md`, `DEPLOYMENT.md`, or create a sibling README under `k8s/demo-staging/` to describe the new environment, prerequisites, and how to deploy/validate it. -- [x] 2.2 Teach `k8s/demo/deploy.sh` (or create a `demo-staging` variant) to accept an environment flag so operators can run a single command to apply the new manifest set to the `demo-staging` namespace with the correct secrets/configmap payloads. -- [x] 2.3 Document any DNS/cert-manager prerequisites for `demo-staging.serviceradar.cloud` (external-dns annotation, TLS secret provisioning) inside the repo so ops knows what to configure before applying the manifests. - -## 3. Validation -- [x] 3.1 Add a validation step (docs snippet or CI note) that runs `kustomize build k8s/demo/staging | kubeconform` (or `kubectl apply --dry-run`) so we can prove the manifests render successfully. -- [x] 3.2 Capture smoke-test steps for the new namespace (e.g., `kubectl -n demo-staging get ingress,deployments`, hitting `https://demo-staging.serviceradar.cloud/healthz`) once the configuration is applied. diff --git a/openspec/changes/add-e2e-cardinality-checks/proposal.md b/openspec/changes/add-e2e-cardinality-checks/proposal.md deleted file mode 100644 index 4dc8e05f2..000000000 --- a/openspec/changes/add-e2e-cardinality-checks/proposal.md +++ /dev/null @@ -1,68 +0,0 @@ -# Change: Add E2E Cardinality Health Checks for CI/CD Pipeline - -## Why - -After the DIRE (Device Identity and Reconciliation Engine) refactor, we need automated verification that device cardinality remains stable at ~50k devices. Currently, there's no automated way to verify post-deployment that: -1. The device count matches expected baseline -2. No duplicate devices exist (by strong identifier) -3. The system is healthy enough to proceed in a release pipeline - -Manual verification after each deployment is error-prone and doesn't scale with CI/CD automation. - -## What Changes - -### 1. Health Check Endpoint -Add a `/health/cardinality` endpoint to the core service that returns: -- Current device count from CNPG -- Current device count from in-memory registry -- Drift between the two -- Duplicate check results (distinct armis_device_id vs total count) -- Pass/fail status based on configurable thresholds - -### 2. Integration Test Job -Create a Kubernetes Job that can be run post-deployment to validate cardinality: -- Queries the health endpoint -- Validates counts against expected baseline (configurable) -- Returns exit code 0 on pass, non-zero on failure -- Outputs structured JSON for CI parsing - -### 3. GitHub Actions Integration -Add workflow that: -- Triggers on release/tag -- Builds with Bazel, gets git SHA from BuildBuddy -- Deploys to demo namespace via ArgoCD with correct image tags -- Waits for ArgoCD sync to complete -- Runs the cardinality check job -- Fails the pipeline if cardinality check fails - -### 4. ArgoCD Application Configuration -Configure ArgoCD to: -- Use Helm chart from repo -- Accept image tag overrides from CI -- Sync to demo namespace -- Report sync status for CI polling - -## Impact - -- **Affected specs**: NEW `e2e-health-checks` capability -- **Affected code**: - - `pkg/core/api/` - new health endpoint - - `helm/serviceradar/` - cardinality check job template - - `.github/workflows/` - release pipeline with E2E checks - - `argocd/` - ArgoCD application manifests -- **Risk**: Low - additive changes, doesn't modify core device processing -- **Dependencies**: ArgoCD installed in cluster, GitHub Actions runners with kubectl access - -## Trade-offs Considered - -### Option A: Health Endpoint vs Direct DB Query -- **Chosen**: Health endpoint -- **Rationale**: Endpoint can be reused by monitoring/alerting, not just CI. Also tests the full stack (API layer working). - -### Option B: Kubernetes Job vs GitHub Action Script -- **Chosen**: Kubernetes Job triggered by GitHub Actions -- **Rationale**: Job runs inside cluster with proper RBAC, no need to expose endpoints externally. GH Action just triggers and waits. - -### Option C: ArgoCD vs Direct Helm Deploy from CI -- **Chosen**: ArgoCD -- **Rationale**: GitOps pattern, ArgoCD handles rollback, sync status, and audit trail. CI just updates image tags and triggers sync. diff --git a/openspec/changes/add-e2e-inventory-validation/proposal.md b/openspec/changes/add-e2e-inventory-validation/proposal.md deleted file mode 100644 index 34da2041d..000000000 --- a/openspec/changes/add-e2e-inventory-validation/proposal.md +++ /dev/null @@ -1,42 +0,0 @@ -# Change: Add End-to-End Inventory Count Validation - -## Why - -Recent regressions in the Device Registry (specifically around IP canonicalization, merges, and churn handling) have caused significant "inventory collapse" where 50,000+ input devices resulted in only ~360 or ~49,000 stored devices. - -These issues were only detected after deployment to the demo environment. We need a distinct, automated Gatekeeper that guarantees **Input Cardinality ≈ Output Cardinality** for our standard Faker dataset. - -If the Faker generates 50,000 unique Armis devices, the database **MUST** contain 50,000 unique canonical records. Any deviation indicates a logic bug in identity resolution or deduplication. - -## What - -Implement a dedicated End-to-End (E2E) test suite that: -1. Connects to a live ServiceRadar environment (specifically targeting the `srql-fixtures` namespace or a dedicated CI ephemeral stack). -2. Triggers/Waits for the "Faker" agent to complete a full synchronization of its 50k device dataset. -3. Directly queries the CNPG database (`unified_devices`) to verify the count of canonical devices. -4. Asserts that `COUNT(*) >= 50,000` (or exact match if environment is isolated). -5. Validates no "Black Hole" merges (e.g., checks for high counts of `_merged_into` pointing to non-existent targets). - -## How - -### 1. Test Harness (`tests/e2e/inventory`) -Create a Go-based integration test suite that uses: -- **Core Client:** To query API status and potentially trigger syncs. -- **DB Client:** To execute SQL assertions against CNPG. -- **Environment Config:** `SR_E2E_API_URL`, `SR_E2E_DB_DSN`. - -### 2. Execution Flow -1. **Setup:** Ensure target DB is clean (or account for baseline). -2. **Trigger:** Wait for Faker agent to report "Sync Complete" or monitor `device_ingest_count` metric. -3. **Stabilize:** Wait for async queues (AGE graph, search index) to drain (optional, mostly concerned with Postgres persistence here). -4. **Assert:** - - `SELECT COUNT(*) FROM unified_devices WHERE (metadata->>'_merged_into' IS NULL OR metadata->>'_merged_into' = device_id) AND ...` - - Expect `50,002` (Faker fixed seed count). -5. **Diagnose:** If count fails, run diagnostic queries (top merge targets, orphan tombstones) and dump to logs. - -### 3. CI Integration -- Add a Github Action / BuildBuddy step that runs this test against the `srql-fixtures` cluster (or Kind) on nightly builds or pre-release tags. - -## Success Criteria -- The test passes effectively on the current `demo` namespace (proving the fix works). -- The test fails if we re-introduce the "Weak vs Strong" merge bug (proving it catches regressions). diff --git a/openspec/changes/add-e2e-inventory-validation/tasks.md b/openspec/changes/add-e2e-inventory-validation/tasks.md deleted file mode 100644 index cca4f7eda..000000000 --- a/openspec/changes/add-e2e-inventory-validation/tasks.md +++ /dev/null @@ -1,20 +0,0 @@ -## 1. Test Harness Implementation - -- [x] Create `tests/e2e/inventory` package. -- [x] Implement `InventoryValidator` struct with DB and API clients. (Implemented as direct test function for simplicity) -- [x] Add `ValidateCanonicalCount(minCount int)` method. (Implemented as assertion in test) -- [x] Add `DiagnoseInventoryCollapse()` method to log top merge targets and tombstone chains. - -## 2. Faker Integration - -- [ ] Ensure Faker agent exposes a "Sync Status" metric or API endpoint we can poll. -- [x] Alternatively, implement a "wait for quiet" logic based on ingestion metrics. (Implemented `Eventually` poll on DB count) - -## 3. Local/Dev Verification - -- [ ] Create a runbook/script to run the test against `kubectl port-forward` of the demo environment. -- [ ] Verify it passes against the current fixed deployment. - -## 4. CI Automation - -- [ ] Define the CI job (GitHub Actions or BuildBuddy) to deploy the stack (if not using persistent fixture) and run the test. diff --git a/openspec/changes/add-identity-reconciliation-engine/design.md b/openspec/changes/add-identity-reconciliation-engine/design.md deleted file mode 100644 index 523e66c7d..000000000 --- a/openspec/changes/add-identity-reconciliation-engine/design.md +++ /dev/null @@ -1,26 +0,0 @@ -## Context -IP churn created duplicate devices because weak identifiers were promoted as durable records. We need a cross-service Identification & Reconciliation Engine (IRE) that ingests sightings, correlates identifiers, and promotes/merges devices per policy with auditability and safe rollout. - -## Goals / Non-Goals -- Goals: formalize network sighting lifecycle; introduce identifier/fingerprint indexing; policy-driven promotion/merge; subnet-aware TTLs; observability/audit; feature-gated rollout with shadow evaluation. -- Non-Goals: automatic subnet classification (manual config first); new external discovery sources beyond existing sweep/agents/sync; SIEM/third-party export; destructive cleanup of legacy data without audit. - -## Decisions -- Data model: add `network_sightings`, `device_identifiers`, `fingerprints`, `subnet_policies`, `sighting_events`, `merge_audit` with indexes for IP/subnet/status, identifier uniqueness, and fingerprint lookups. -- Confidence tiers: Tier 3 (sightings) held with TTL; Tier 2 promotions require policy match (persistence/fingerprint/hostname/subnet rules); Tier 1 anchored by strong IDs (MAC, serial, agent, cloud/ext IDs) absorbs merges. -- Ingestion responsibility: sweep/poller/agents/sync emit sightings + identifiers; registry owns reconciliation (caches, scoring, promotion/merge, audit); reaper enforces TTL by subnet profile. -- Feature gating/rollout: helm/flag toggles for sightings-only ingestion, fingerprinting, promotion automation; start shadow-mode promotion with logging; enable partial unique constraints after stability. - -## Risks / Trade-offs -- Over-promotion/false merges → mitigate with multi-signal scoring, shadow mode, manual promotion UI, reversible soft-merge markers. -- Under-promotion/stale sightings → metrics/alerts on aged sightings; operator overrides; policy tuning. -- Performance on churny subnets → batch reconciliation with indexes and caches; rate limits; background workers. -- Operational complexity → centralize policy defaults, docs/runbooks, and dashboards; prefer simple JSON rules per subnet. - -## Migration Plan -1) Add schema + indexes; seed default subnet policies. 2) Ship ingestion split with flag (sightings store) while keeping legacy path toggleable. 3) Enable reaper v2 for sightings only. 4) Run promotion/merge in shadow mode with metrics/audit. 5) Backfill identifiers/fingerprints and merge existing duplicates with audit trail. 6) Enable automated promotion per subnet class; then tighten partial unique constraints. 7) Remove legacy IP-as-ID paths except where policy allows. - -## Open Questions -- Default TTLs per class (guest/dynamic/static) for first rollout? -- Do we require hostname + fingerprint for auto-promotion in dynamic subnets or allow persistence-only? -- How to surface operator overrides in API contracts (PATCH sighting vs dedicated promotion endpoint)? diff --git a/openspec/changes/add-identity-reconciliation-engine/proposal.md b/openspec/changes/add-identity-reconciliation-engine/proposal.md deleted file mode 100644 index adca03257..000000000 --- a/openspec/changes/add-identity-reconciliation-engine/proposal.md +++ /dev/null @@ -1,16 +0,0 @@ -# Change: Add Identity & Reconciliation Engine with Network Sightings - -## Why -Device duplication and IP churn highlight that IP-as-identity is insufficient. We need a formal Identification & Reconciliation Engine (IRE) that treats network sightings separately from durable devices, promotes only when policies are met, and merges deterministically when strong identifiers arrive. - -## What Changes -- Add network sighting lifecycle (ingest, TTL, promotion) with subnet-aware policies. -- Introduce identifier indexing (strong/middle/weak) and reconciliation engine to promote/merge devices. -- Add schema for sightings, identifiers, fingerprints, policies, audits; update sweep/agents/registry/sync paths to use it. -- Expose API/UI surfaces for sightings, promotions, policies, and merge/audit visibility. -- Ship metrics/alerts and rollout gating (feature flags, reaper profiles). -- Enforce faker/DIRE guardrails: strong-ID merges must absorb IP churn without inflating device counts, baseline faker datasets stay at 50k (+internal), and promoted sightings start unavailable until real probes flip availability. - -## Impact -- Affected specs: `device-identity-reconciliation` -- Affected code: registry/device identity resolver, sweep/poller ingestion, sync integrations, CNPG schema/migrations, API/UI, Helm values, metrics/alerts, background jobs/reapers. diff --git a/openspec/changes/add-identity-reconciliation-engine/specs/device-identity-reconciliation/spec.md b/openspec/changes/add-identity-reconciliation-engine/specs/device-identity-reconciliation/spec.md deleted file mode 100644 index 0f429d38f..000000000 --- a/openspec/changes/add-identity-reconciliation-engine/specs/device-identity-reconciliation/spec.md +++ /dev/null @@ -1,85 +0,0 @@ -## ADDED Requirements -### Requirement: Network Sightings Lifecycle -The system SHALL persist network sightings as low-confidence, partition-scoped observations with source, timestamps, subnet, TTL expiry, and metadata (DNS/ports/fingerprint status), keeping at most one active sighting per partition+IP. - -#### Scenario: Record and refresh a sighting -- **WHEN** a sweep/poller/agent reports a sighting for an IP without strong identifiers -- **THEN** an active sighting record is created or refreshed with updated last_seen and TTL without creating a UnifiedDevice - -### Requirement: Identifier Indexing and Strong-ID Merge -The system SHALL index strong (MAC, serial, agent ID, cloud/external IDs) and middle (hostname, fingerprint hash) identifiers, and SHALL attach sightings or devices to the existing UnifiedDevice when a strong identifier matches, merging weaker records instead of generating a new device. - -#### Scenario: Strong ID arrives after sighting -- **WHEN** a MAC or external ID is observed for an IP that already has an active sighting or Tier 2 device -- **THEN** the sighting/device is merged into the canonical device keyed by that strong identifier, and the canonical device retains history and identifiers - -### Requirement: Policy-Driven Promotion -The system SHALL promote a sighting to a UnifiedDevice only when subnet policy criteria are met (e.g., persistence duration, fingerprint/hostname confidence, allow-IP-as-ID flag) and SHALL keep or block promotion when criteria fail. - -#### Scenario: Dynamic subnet requires persistence + fingerprint -- **WHEN** a sighting in a dynamic subnet lacks fingerprint/hostname after the persistence window -- **THEN** it remains a sighting and is not promoted, and promotion is deferred until policy conditions are satisfied - -### Requirement: Reaper and TTL Enforcement -The system SHALL enforce per-subnet TTLs for sightings and low-confidence devices, expiring them when they exceed policy limits while leaving promoted Tier 1 devices untouched. - -#### Scenario: Expire stale guest sighting -- **WHEN** a guest-subnet sighting exceeds its configured TTL without promotion -- **THEN** the reaper marks it expired and removes it from active listings without affecting devices in other tiers - -### Requirement: Auditability and Metrics -The system SHALL record audit events for promotion, merge, and expiry decisions and SHALL expose metrics for sightings, promotions, merges, reaper actions, and policy blocks. - -#### Scenario: Promotion audit trail -- **WHEN** a sighting is promoted or merged -- **THEN** an audit event is written with decision reason, identifiers used, and acting policy, and metrics counters are incremented - -### Requirement: Sightings UI/API Separation and Overrides -The system SHALL expose API/UI views that list sightings separately from device inventory and SHALL allow authorized operators to promote, dismiss, or override policy for individual sightings with audit logging. - -#### Scenario: Operator promotes a sighting -- **WHEN** an operator issues a promotion action on a sighting via API/UI -- **THEN** the system creates/attaches to the appropriate device per identifiers, records the override in audit logs, and updates listings so the sighting no longer appears active - -#### Scenario: Operator sees promotion context -- **WHEN** a sighting is displayed in the UI -- **THEN** the UI highlights the identifiers present (hostname/MAC/fingerprint), shows the active policy state (e.g., promotion disabled or awaiting thresholds), and explains why it remains a sighting - -#### Scenario: Paginate through active sightings -- **WHEN** an operator has more active sightings than the current page size -- **THEN** the API/UI return total counts and support `limit`/`offset` pagination so the operator can page through all sightings - -### Requirement: Promotion Lineage Visibility -The system SHALL surface on device detail views when and how a device was promoted (source sighting ID, time, policy/override) so operators can audit identity assignment. - -#### Scenario: View promotion history on device -- **WHEN** an operator opens a device detail page -- **THEN** they can see promotion metadata including the originating sighting (if applicable), promotion timestamp, and whether it was auto, policy-driven, or manual override - -### Requirement: Strong-ID Merge Under IP Churn -The system SHALL treat strong identifiers (e.g., MAC, Armis ID, NetBox ID) as canonical across IP churn, merging repeated sightings/updates that share those identifiers into a single device and keeping inventory within the expected strong-ID cardinality (e.g., 50k faker devices plus internal services). - -#### Scenario: Faker IP shuffle does not inflate inventory -- **WHEN** multiple sightings arrive over time for the same `armis_device_id` or MAC but with different IPs/hostnames -- **THEN** the reconciliation engine attaches them to the existing canonical device instead of creating new devices, and total device inventory stays within the configured strong-ID baseline tolerance - -### Requirement: Sweep Sightings Enrich Strong-ID Devices -The system SHALL merge sweep/poller sightings whose IP matches an existing Tier 1 UnifiedDevice anchored by strong identifiers, treating the sighting as availability/port enrichment instead of leaving it pending in the sightings store. - -#### Scenario: Sweep sighting attaches to canonical device -- **WHEN** a sweep sighting arrives for an IP that maps to exactly one canonical device in the partition (keyed by strong identifiers and without conflicting identifiers) -- **THEN** the sighting is absorbed into that device, availability/port data is recorded on the device, an audit entry is written, and the sighting no longer remains active - -### Requirement: Promotion Availability Defaults -The system SHALL mark devices promoted from sightings as unavailable/unknown until a successful health probe is ingested and SHALL NOT mark them available solely because a sighting was promoted. - -#### Scenario: Unreachable faker devices stay unavailable -- **WHEN** a sighting with no successful sweep/agent availability is promoted to a device -- **THEN** the resulting device remains unavailable (or unknown) and only flips to available after a positive probe result is processed - -### Requirement: Cardinality Drift Detection -The system SHALL surface metrics/alerts when reconciled device counts deviate beyond a configurable tolerance from the strong-identifier baseline and SHALL block or rate-limit further promotion when drift is detected until operators acknowledge/override. - -#### Scenario: Device count exceeds baseline tolerance -- **WHEN** the reconciled device inventory exceeds the configured baseline (e.g., 50k faker devices) by more than the tolerance for a sustained window -- **THEN** an alert is emitted and promotion is paused or gated until the drift is addressed or explicitly overridden diff --git a/openspec/changes/add-identity-reconciliation-engine/tasks.md b/openspec/changes/add-identity-reconciliation-engine/tasks.md deleted file mode 100644 index ba09ba067..000000000 --- a/openspec/changes/add-identity-reconciliation-engine/tasks.md +++ /dev/null @@ -1,25 +0,0 @@ -## 1. Implementation -- [x] 1.1 Finalize data model and migrations for sightings, identifiers, fingerprints, subnet policies, and audit tables with required indexes/constraints. -- [x] 1.2 Add feature flags and Helm values for IRE, promotion thresholds, fingerprinting, and reaper profiles with safe defaults. -- [x] 1.3 Update sweep/poller/agent/sync ingestion to emit network sightings (partition/subnet, weak+middle signals) instead of creating devices when no strong ID. -- [x] 1.4 Implement registry IRE core: identifier caches, policy evaluation, promotion/merge scoring, identifier upserts, and deterministic device assignment. -- [x] 1.5 Implement policy-driven reaper for sightings and low-confidence devices, respecting subnet profiles and audit logs. -- [x] 1.6 Add API/UI surfaces for sightings, promotion queue, manual overrides, subnet policies, and merge/audit history. -- [x] 1.7 Add metrics, logs, and alerts for sightings, promotions, merges, reaper actions, and cache health; update dashboards. -- [ ] 1.8 Migration/backfill: seed subnet policies, reconcile/merge existing duplicates with audit, and ensure rollback plan. -- [ ] 1.9 Tests: unit, integration, and load for ingestion → promotion/merge flow; shadow-mode validation in demo before enabling automation. -- [x] 1.10 Expose identity reconciliation config via API/UI with KV-backed edits (flags, promotion, reaper, fingerprinting) and validation. -- [x] 1.11 Improve sightings UX: show why each sighting is pending (policy state/identifiers), add pagination/totals, and capture device promotion lineage in device detail views. -- [x] 1.12 Clamp faker/DIRE inputs: enforce deterministic 50k IP/hostname pairs with persisted dataset reuse, prevent IP shuffle from expanding the address set, and alert when cardinality drifts beyond tolerance. -- [x] 1.13 Fix promotion availability semantics: promoted sightings must start unavailable/unknown until probes report health; wire metrics to catch false-positive availability. -- [ ] 1.14 Add regression tests that ingest the faker dataset end-to-end (sightings → promotion) and assert device counts stay at 50k (+internal) with unreachable devices remaining unavailable. -- [x] 1.15 Publish Prometheus alert templates for identity drift/promotion metrics and include in monitoring bridge change to keep identity telemetry consumable. -- [x] 1.16 Drift mitigations: disable fingerprint gating when fingerprinting is off, pin faker Helm values to non-expanding IP shuffle defaults, and retag demo images (sha-13d9cc627541190980bbad253ae6b3484a2648a0) to keep counts anchored. -- [ ] 1.17 Sweep sighting enrichment merge: when a sweep sighting IP matches a Tier 1 canonical device, merge it instead of leaving it pending, apply availability/port data, emit audit/metrics, and drain the existing ~50k backlog of sweep sightings. - -## Deployment status -- Built/pushed faker with non-expanding IP shuffle: `ghcr.io/carverauto/serviceradar-faker:sha-f29d4f40c12c4a560dfa5703d451352829637a1f` (digest `sha256:70248044ebb68d0a5dd32959cd089f06494c101b830777bae5af6c13090628f3`) and updated Helm to pin it. -- Added identity config API/UI and warning logs when RequireFingerprint is auto-disabled. -- Helm values now use an `appTag` anchor; promotion stays disabled with `sightingsOnlyMode=true` so sweeps are held as sightings until strong IDs arrive. -- Registry now always treats sweep payloads as sightings; strong-ID updates bypass IP dedupe and IP fallback, preventing Armis collisions on shared IPs. Added IP-fallback tests for strong vs weak updates. -- Built/pushed new core image `ghcr.io/carverauto/serviceradar-core:sha-a8d4913e1788` (digest `sha256:a8d4913e178827ba853cea956b79732cacae90c25055d1c8cc76dc9fa340fd7a`) and pinned Helm to it. CNPG tables truncated, core/sync/poller restarted, and ingest rerun. Demo now reports `unified_devices=50,002` (50k faker + 2 internal) with all 50k Armis IDs preserved. diff --git a/openspec/changes/add-inventory-groups-and-dashboard-customization/design.md b/openspec/changes/add-inventory-groups-and-dashboard-customization/design.md new file mode 100644 index 000000000..74f50d1ad --- /dev/null +++ b/openspec/changes/add-inventory-groups-and-dashboard-customization/design.md @@ -0,0 +1,114 @@ +# Design: Inventory groups + customizable dashboards + +## Overview +This change introduces: +- a first-class grouping model for **devices** and **services**, +- customizable dashboards that can render **group health widgets**, +- admin-only **on-demand network sweeps**, and +- **RBAC** to safely gate admin workflows. + +Group health is computed from **rollups** where possible and kept up to date via **background jobs**. + +## Data Model (Phoenix-owned) + +### `inventory_groups` +- `id` (UUID) +- `name` (string) +- `group_type` (enum: `device` | `service`) +- `parent_id` (UUID nullable) – hierarchy +- `is_system` (boolean) – used for root `Inventory` +- `metadata` (jsonb) – capability flags (ex: `has_sysmon`, `has_snmp`) +- `inserted_at` / `updated_at` + +### `inventory_group_memberships` (static) +- `group_id` +- `member_type` (`device` | `service`) +- `device_id` (nullable) +- `service_id` (nullable) – representation TBD (may be composite) +- `source` (`static` | `dynamic`) +- unique constraints per member + +### `inventory_group_rules` (dynamic) +- `group_id` +- `rule_type` (`srql` | `json`) +- `query` (text) – SRQL or serialized rule definition +- `last_run_at`, `last_run_status`, `last_match_count`, `last_error` +- `enabled` (boolean) + +### Dashboards +Store user-owned dashboards and widgets: +- `dashboards`: `id`, `user_id`, `name`, `is_default`, `layout_version`, timestamps +- `dashboard_widgets`: `id`, `dashboard_id`, `kind`, `title`, `config` (jsonb), `position` (jsonb), timestamps + +### RBAC (initial) +Keep RBAC intentionally simple but enforce it everywhere: +- `ng_users.role` (enum: `admin` | `operator` | `viewer`) OR a join table if we need multi-role later. +- Authorization is enforced in Phoenix router/live_session boundaries; UI hiding is secondary. +- Admin-only areas include: group CRUD/rules, bulk edits, on-demand sweeps, and dashboard templates that affect shared views. + +### On-demand sweeps +Track admin-triggered sweep runs and their lifecycle: +- `ondemand_sweep_runs`: + - `id` (UUID), `requested_by_user_id`, `poller_id`, optional `agent_id` + - `targets` (CIDR/list), `options` (jsonb), `status`, `started_at`, `finished_at`, `error` + - `retention_days` (int, default 30, bounded) +- `ondemand_sweep_results`: + - `run_id`, `host_ip`, `network_cidr`, `icmp_available`, open ports, metadata, timestamps + - May reference or mirror existing `sweep_host_states` depending on final storage strategy. + +## Dynamic Groups: Criteria Language +Preferred option: store SRQL queries as the rule format. +- Pros: reuses existing translator, familiar to operators, flexible. +- Safety: rules must be **bounded** (limit), have a timeout, and only target allowed entities. +- Rule evaluation runs asynchronously and materializes membership rows so the UI does not run heavy criteria queries inline. + +## Group Hierarchy Semantics +- Every device/service is conceptually under the system root `Inventory`. +- Hierarchy is for presentation and filtering. Membership can be queried as: + - `direct` members (explicit rows) + - `effective` members (includes descendants) – computed in queries, not duplicated. +- Prevent cycles via DB constraints + application validation. + +## Rollups Strategy (Timescale) +Avoid per-group DDL by creating shared rollups that are keyed by `group_id`. +- Example approach: + - Join `inventory_group_memberships` to telemetry hypertables by `device_id`. + - Use `time_bucket('5m', timestamp)` with `group_id` in the GROUP BY. + - Compute summary aggregates (avg/max/p95 where applicable). + +Notes: +- Timescale continuous aggregates have limitations with joins; if join-based CAGGs are not viable, fallback to: + - materialized rollup tables updated by Oban on a schedule. + +## Background Jobs (Oban) +Oban is used to keep dynamic membership and rollups current without impacting request latency. +- `GroupRuleEvalWorker`: evaluate rule → upsert memberships (source=`dynamic`). +- `GroupCapabilityWorker`: compute flags like `has_sysmon`/`has_snmp` based on recent telemetry. +- `GroupRollupRefreshWorker`: request refresh/backfill after group changes or on schedule. + +### On-demand sweeps pipeline (Oban + KV/datasvc + poller/agent) +Constraints: +- `web-ng` cannot (and should not) call agents/checkers directly from request handlers. +- Pollers are the orchestrators that already talk to agents. + +Proposed flow: +1. Admin schedules a sweep from `web-ng` by selecting a `poller_id` (and optionally `agent_id`) and providing targets/options. +2. `web-ng` creates an `ondemand_sweep_runs` record (`status=queued`) and enqueues an Oban job. +3. Oban worker dispatches a sweep request via datasvc/KV (NATS-backed): + - write a job payload under a well-known key namespace scoped by `poller_id` and/or `agent_id` (ex: `jobs/sweeps//.json`). + - include an idempotency key, TTL, and desired retention. +4. Poller watches its job namespace and, upon seeing a sweep job: + - triggers the agent sweep service, + - periodically calls `GetResults` until completion/timeout, + - persists results into CNPG tables (either `ondemand_sweep_results` or correlated rows in `sweep_host_states`). +5. `web-ng` reads run status/results from the database and renders them in the UI. + +Retention: +- Default to 30 days for on-demand sweep results, configurable per run but capped. +- Cleanup is enforced by Timescale retention (if hypertable) or by scheduled Oban cleanup. + +## UI/UX +- Devices view adds a group tree panel and multi-select/bulk actions. +- Group pages provide CRUD, membership previews, and health widgets. +- Dashboards allow user customization and pinning group health widgets. +- Add an admin “Sweeps” area to schedule and view sweep runs/results. diff --git a/openspec/changes/add-inventory-groups-and-dashboard-customization/proposal.md b/openspec/changes/add-inventory-groups-and-dashboard-customization/proposal.md new file mode 100644 index 000000000..6c1d9950e --- /dev/null +++ b/openspec/changes/add-inventory-groups-and-dashboard-customization/proposal.md @@ -0,0 +1,48 @@ +# Change: Inventory groups + dashboards + admin sweeps + RBAC + +## Why +- Operators need a scalable way to organize large inventories into meaningful hierarchies (sites, environments, vendors, roles) without relying on ad-hoc SRQL filters. +- Admins need bulk workflows (select many devices/services → edit once → assign to groups) to keep the inventory curated. +- Users want dashboards that reflect *their* operational concerns (ex: “Sysmon fleet health for Site A”), not a fixed set of global cards. +- Real-time aggregate queries over hypertables are expensive; group-level health should be derived from precomputed rollups where possible. +- Operators need an admin-only, audited way to trigger **on-demand network sweeps** from a chosen poller without shell access. +- As we add powerful admin features (groups, bulk edit, sweeps), we must enforce **role-based access control (RBAC)** consistently across UI and API routes. + +## What Changes +- **Groups (Devices + Services):** + - Add hierarchical groups with a built-in root group `Inventory`. + - Support both **static membership** (manual assignments) and **dynamic membership** (criteria-driven rules). +- **Bulk editing:** + - Admin UI to select one/many devices (and later services) and apply changes in one place (group assignments + basic metadata/fields). +- **Dynamic groups:** + - Admins can define criteria (partition, vendor, model, OS, tags, etc.) and the system keeps membership up to date automatically. + - Criteria is stored declaratively and evaluated asynchronously. +- **Dashboard customization:** + - Users can create dashboards, add/reorder/remove widgets, and save configurations. + - Add group-scoped widgets (ex: CPU/Mem/Disk utilization summary, service availability) that can be pinned to dashboards. +- **Rollups + background jobs:** + - Add/extend rollups to support fast group-level health queries (sysmon utilization and service availability). + - Add background jobs (Oban) to: + - Recompute dynamic group membership. + - Detect “metrics availability” for a group (ex: sysmon present) and enable appropriate widgets. + - Trigger rollup refresh/backfill when group membership or rules change. +- **On-demand network sweeps (admins only):** + - Add an admin UI to schedule a sweep run from a selected poller (and optionally a target agent), with parameters like CIDR/targets, scan options, and retention. + - Persist sweep run metadata and results for a bounded retention window (default 30 days; configurable). + - Execute sweeps asynchronously via Oban, dispatching jobs through the existing NATS-backed configuration/control plane (datasvc/KV) so `web-ng` does not need direct agent connectivity. +- **RBAC:** + - Introduce a roles/permissions model in `web-ng` and enforce it at the router/controller layer. + - Gate group management, bulk edits, and on-demand sweeps behind admin-only permissions. + +## Non-Goals +- No cut-over/migration from the legacy React UI. +- No breaking changes to ingestion semantics (core/pollers/checkers keep writing the same telemetry). +- No requirement to create one Timescale CAGG *per group* at runtime; rollups MAY be implemented as shared aggregates keyed by `group_id` (preferred). +- No fully general ABAC system. RBAC is introduced to safely support admin-only workflows; finer-grained policy can follow. +- `web-ng` MUST NOT establish direct RPC connections to agents/checkers from request handlers; all actions are asynchronous and mediated by pollers/datasvc. + +## Impact +- **`web-ng/` (Phoenix):** new contexts, LiveViews, Ecto migrations (groups/dashboards/Oban), and UI changes to the Devices view for group hierarchy + bulk actions. +- **CNPG/Timescale:** new rollup objects and/or policies for group-level health queries. +- **SRQL:** MAY be used as the criteria language for dynamic groups; evaluation must remain safe and bounded. +- **KV/datasvc + pollers/agents:** add a job dispatch/watch pattern for on-demand sweeps, plus a result persistence path. diff --git a/openspec/changes/add-inventory-groups-and-dashboard-customization/specs/cnpg/spec.md b/openspec/changes/add-inventory-groups-and-dashboard-customization/specs/cnpg/spec.md new file mode 100644 index 000000000..9667acbb2 --- /dev/null +++ b/openspec/changes/add-inventory-groups-and-dashboard-customization/specs/cnpg/spec.md @@ -0,0 +1,24 @@ +# cnpg Specification (Delta): Group health rollups + +## ADDED Requirements + +### Requirement: Rollups support group-level health widgets +The system SHALL provide Timescale-backed rollups that enable fast group-level health widgets (utilization and availability) without requiring per-group DDL at runtime. + +#### Scenario: Group utilization rollup exists +- **GIVEN** the rollups migrations for group health have been applied +- **WHEN** an operator inspects `timescaledb_information.continuous_aggregates` +- **THEN** a rollup exists that can answer utilization queries grouped by `group_id` over time buckets. + +#### Scenario: Rollups have refresh policies +- **GIVEN** group rollups exist +- **WHEN** an operator inspects `timescaledb_information.jobs` +- **THEN** refresh policies are configured for each rollup and run on a predictable cadence. + +### Requirement: On-demand sweep results are retained with predictable TTL +The system SHALL persist on-demand sweep results in a way that supports a default 30-day retention (configurable within bounds) and provides predictable cleanup. + +#### Scenario: Sweep results retention policy exists +- **GIVEN** on-demand sweep result storage has been deployed +- **WHEN** an operator inspects Timescale retention policies (or the documented cleanup job) +- **THEN** a cleanup mechanism exists that removes sweep results after the configured retention window. diff --git a/openspec/changes/add-inventory-groups-and-dashboard-customization/specs/kv-configuration/spec.md b/openspec/changes/add-inventory-groups-and-dashboard-customization/specs/kv-configuration/spec.md new file mode 100644 index 000000000..091164e05 --- /dev/null +++ b/openspec/changes/add-inventory-groups-and-dashboard-customization/specs/kv-configuration/spec.md @@ -0,0 +1,22 @@ +# kv-configuration Specification (Delta): Admin sweep job dispatch + +## ADDED Requirements + +### Requirement: Admin-triggered sweep jobs are dispatched via KV in a poller-scoped namespace +The system SHALL dispatch admin-triggered on-demand sweep requests by writing a job payload into the KV store under a well-known, poller-scoped namespace so pollers can watch and execute the jobs without `web-ng` directly calling agents. + +#### Scenario: Web schedules a sweep by writing a KV job entry +- **GIVEN** an authenticated admin schedules an on-demand sweep from poller `p1` +- **WHEN** the dispatch worker runs +- **THEN** it writes a job payload under a key namespace scoped to `p1` (ex: `jobs/sweeps/p1/.json`) +- **AND** the payload includes targets/options, a TTL, and an idempotency key. + +### Requirement: Pollers watch for sweep jobs and execute them safely +Pollers SHALL watch their sweep job namespace in KV and execute jobs with bounded concurrency, timeouts, and idempotency. + +#### Scenario: Poller executes a sweep job at most once +- **GIVEN** a sweep job is present in the poller’s KV namespace +- **WHEN** the poller processes the job and crashes/restarts mid-run +- **THEN** the poller resumes safely without duplicating the sweep execution +- **AND** it marks job completion/failure in a predictable way (DB and/or KV status key). + diff --git a/openspec/changes/add-inventory-groups-and-dashboard-customization/tasks.md b/openspec/changes/add-inventory-groups-and-dashboard-customization/tasks.md new file mode 100644 index 000000000..4853e2d33 --- /dev/null +++ b/openspec/changes/add-inventory-groups-and-dashboard-customization/tasks.md @@ -0,0 +1,83 @@ +## 1. Discovery / Decisions +- [ ] Confirm the device/service identifiers to use for membership (`device_id`, `service_id`/composite keys). +- [ ] Confirm which device fields are stable for dynamic rules (partition, vendor, model, OS, tags). +- [ ] Decide the criteria format: + - [ ] Store SRQL (recommended) vs store structured JSON rules. + - [ ] Define a bounded evaluation window and safeguards (limit, timeout, max results). +- [ ] Decide group inheritance semantics: + - [ ] Whether membership in a child implies membership in parents (default: yes via hierarchy, not duplicate rows). + - [ ] Whether dynamic groups can be children of static groups. + +## 2. Database (Phoenix app tables) +- [ ] Add Ecto migrations under `web-ng/priv/repo/migrations/` for: + - [ ] `inventory_groups` (hierarchy + type device/service + system root). + - [ ] `inventory_group_memberships` (static memberships). + - [ ] `inventory_group_rules` (dynamic criteria + evaluation metadata). + - [ ] `dashboards` and `dashboard_widgets` (user-owned configs). +- [ ] Add Oban tables/migrations (if not already present). +- [ ] Add indexes for scale: `(parent_id)`, `(group_type)`, `(device_id)`, `(service_id)`, `(user_id)`, etc. + +## 3. Rollups (Timescale/CNPG) +- [ ] Define rollup strategy for group health: + - [ ] Prefer shared rollups keyed by `group_id` (avoid per-group DDL). + - [ ] Define group sysmon utilization rollups (CPU/Mem/Disk) by time bucket. + - [ ] Define group service availability rollups by time bucket. +- [ ] Add migrations (CNPG or Phoenix, per ownership decision) to create rollups + refresh policies. +- [ ] Provide an operator runbook for rollup verification and refresh/backfill. + +## 4. Background Jobs (Oban) +- [ ] Add an Oban worker to evaluate dynamic group rules and upsert memberships. +- [ ] Add an Oban worker to compute “group capabilities” (ex: sysmon present) and store in group metadata. +- [ ] Add a worker to trigger rollup refresh/backfill when a group changes. +- [ ] Add safety bounds: concurrency limits, per-run cap, and instrumentation. + +## 5. UI / UX (Phoenix LiveView) +- [ ] Devices page: + - [ ] Add group hierarchy panel (root `Inventory` → child groups). + - [ ] Add “filter by group” and “show children” toggle. + - [ ] Add multi-select + bulk actions (assign/remove groups). +- [ ] Group management: + - [ ] Admin CRUD for groups and rules, with preview of dynamic membership. + - [ ] Group detail view showing members and rollup-backed health widgets. +- [ ] Dashboard customization: + - [ ] User dashboard editor to add/reorder widgets. + - [ ] Add group health widgets (utilization + availability) driven by rollups with safe fallback. + +## 6. Testing / Validation +- [ ] Unit tests for: + - [ ] Group hierarchy invariants (no cycles, root can’t be deleted). + - [ ] Dynamic rule evaluation bounds and idempotency. + - [ ] Bulk assignment semantics. +- [ ] Smoke validation queries for rollups (raw vs rollup totals over a fixed window). + +## 7. RBAC (web-ng) +- [ ] Define initial roles (ex: `admin`, `operator`, `viewer`) and the permissions needed for: + - [ ] Group CRUD and rule evaluation controls + - [ ] Bulk device/service edits + - [ ] On-demand sweep scheduling and viewing results +- [ ] Add Ecto migrations for roles/assignments (or a `roles` column on `ng_users`) and any audit tables needed. +- [ ] Enforce permissions at the Phoenix router/live_session level (admin-only routes). +- [ ] Add UI affordances (hide/disable controls) but keep server-side authorization as the source of truth. +- [ ] Add basic tests for authorization boundaries (“forbidden” for non-admin). + +## 8. On-demand network sweeps (admin-only) +- [ ] Decide sweep targeting model: + - [ ] Sweep from a selected `poller_id` across its configured agents + - [ ] Optionally target a specific `agent_id` (recommended for “sweep from this site”) +- [ ] Add Phoenix tables to track sweep runs: + - [ ] `ondemand_sweep_runs` (who/when/params/status, retention override) + - [ ] `ondemand_sweep_results` (summary + references to per-host results) +- [ ] Define how results are stored: + - [ ] Reuse `sweep_host_states` with job_id correlation OR + - [ ] Create a dedicated hypertable/table for on-demand results with 30-day retention (preferred for controlled TTL). +- [ ] Oban workers: + - [ ] Dispatch sweep job via datasvc/KV + - [ ] Monitor completion / timeout + - [ ] Persist results and mark run status +- [ ] UI: + - [ ] Admin page to schedule a sweep (select poller, input targets/options) + - [ ] Sweep run list + detail view (status, results, export) + - [ ] Optional embedding under a poller/agent device details page +- [ ] Retention: + - [ ] Default 30 days, configurable per run (bounded by a max) + - [ ] Cleanup job/policy and operator guidance diff --git a/openspec/changes/add-mtls-only-edge-onboarding/design.md b/openspec/changes/add-mtls-only-edge-onboarding/design.md deleted file mode 100644 index fa0ab51b7..000000000 --- a/openspec/changes/add-mtls-only-edge-onboarding/design.md +++ /dev/null @@ -1,29 +0,0 @@ -## Context -- Edge sysmon-vm checkers running on macOS arm64 or Linux need an easy bootstrap path when the customer hosts the control plane via Docker Compose on a LAN IP. -- Embedding SPIRE agents per-edge is heavy for the near-term rollout; we can reuse the Compose mTLS authority to issue per-node certs and keep SPIRE experiments optional. -- Target operator flow: issue a token, run `serviceradar-sysmon-vm --mtls --token --host --poller-endpoint 192.168.1.218:`, certs install to `/etc/serviceradar/certs`, and the checker starts with mTLS. - -## Goals / Non-Goals -- Goals: mTLS-only onboarding bundle for sysmon-vm against the Compose stack; token-driven download/installation; keep cert issuance tied to the Compose CA so poller/core trust the edge checker. -- Non-Goals: remove SPIRE from k8s, replace SPIRE long term, or stop experimenting with SPIRE ingress/agents; this is a Compose-first fallback. - -## Decisions (initial) -- Use a per-edge token to authorize bundle download from Core (or a small enrollment handler) that returns CA + client cert/key + expected endpoints. -- sysmon-vm installs bundles under `/etc/serviceradar/certs` (or equivalent writable path) and binds gRPC with that cert; poller/core are pinned to the same CA. -- Compose auto-generates a CA on first run (unless provided) and reuses it for all service leaf certs plus edge bundles; rotation is deliberate, not per-boot. -- Images are built/published (amd64) and the mTLS compose variant consumes a tagged release (`APP_TAG`), keeping SPIRE out of the compose path. - -## Open Questions -- Do we constrain tokens by IP/SAN to reduce bundle leakage, or rely on short TTL + revocation? -- Should bundle issuance live behind Core’s edge-package API or a lightweight enrollment handler in Compose? -- What is the default poller endpoint/port advertised to edge nodes (static in env vs. derived from compose metadata)? - -## Status (2025-11-29) -- mTLS compose stack is up with shared CA; SPIRE bits removed from sysmon-vm. -- Kong/web/auth are working; admin login succeeds with generated credentials. -- Events stream subjects now auto-managed by zen (removed manual stream init). -- OTEL collector publishes logs/metrics/traces to NATS; db-event-writer now ingests into CNPG after mTLS fix and NATS client config. -- KV config fetches fixed by regenerating datasvc cert SAN and reseeding configs (datasvc, otel, trapd, zen-consumer, netflow-consumer). -- Poller/agent now register against core after restarting with the regenerated mTLS config; service tree/admin watchers now show `docker-poller` with child `docker-agent` (agent watcher snapshot timestamp is still the older seed value). -- CNPG now runs with TLS (server cert from compose CA) and compose clients default to `sslmode=verify-full`; cert generator SAN now includes `cnpg` and `cnpg-rw` with/without `.serviceradar`. -- Proton references removed from core config; compose rewrites the DB block to CNPG + mTLS. Core is still crashing during migrations with TLS enabled (`failed to create migrations table: EOF` even after `cnpg-migrate` succeeds), so web/Nginx report 502/500 until core is healthy. diff --git a/openspec/changes/add-mtls-only-edge-onboarding/proposal.md b/openspec/changes/add-mtls-only-edge-onboarding/proposal.md deleted file mode 100644 index 763524876..000000000 --- a/openspec/changes/add-mtls-only-edge-onboarding/proposal.md +++ /dev/null @@ -1,21 +0,0 @@ -# Change: mTLS-only edge onboarding for Docker Compose - -## Why -- Embedding a SPIRE agent inside sysmon-vm is heavy for laptop/edge installs; we need a simpler, near-term path for Linux Compose + darwin/arm64 edge nodes. -- Customers want a token-based, zero/near-zero touch install for thousands of sysmon-vm checkers without introducing SPIRE agents on every host. -- The Compose stack already runs mTLS; we can reuse its CA to issue per-edge client certs and deliver them as onboarding bundles. - -## What Changes -- Add an mTLS onboarding bundle flow: operators mint a token for a sysmon-vm edge node; sysmon-vm uses `--mtls` + token + host to download a CA + client cert/key bundle and poller endpoints, installs them, and starts with mTLS. -- Teach the Docker Compose stack to auto-generate (or accept) a CA, issue leaf certs for core/poller/agent/checkers, and expose an enrollment path for per-edge sysmon-vm bundles (without SPIRE). -- Document the Linux Compose + darwin/arm64 sysmon-vm flow (e.g., target poller at `192.168.1.218:`), while keeping SPIRE ingress/agent experimentation as an optional path. -- Build/publish new images (amd64) and wire an mTLS compose variant using tagged images. - -## Status (2025-11-30) -- Built/pushed all images with `APP_TAG=sha-0bc21e5ee79be0eb143cddd6fc7601f739c39f21` and restarted the mTLS compose stack. -- sysmon-vm mTLS config is generated via config-updater; sysmon-vm at `192.168.1.218:50110` onlines successfully and sysmon metrics are now ingested under the canonical device `sr:88239dc2-7208-4c24-a396-3f868c2c9419` (UI sysmon CPU panel returns data). -- UI connectivity is healthy after the restart. Remaining open item: rotation/regeneration validation. - -## Impact -- Affected specs: edge-onboarding. -- Affected code: Core edge package/bundle delivery, sysmon-vm bootstrap CLI, Docker Compose TLS bootstrap/scripts and docs. diff --git a/openspec/changes/add-mtls-only-edge-onboarding/specs/edge-onboarding/spec.md b/openspec/changes/add-mtls-only-edge-onboarding/specs/edge-onboarding/spec.md deleted file mode 100644 index 39307271e..000000000 --- a/openspec/changes/add-mtls-only-edge-onboarding/specs/edge-onboarding/spec.md +++ /dev/null @@ -1,23 +0,0 @@ -## ADDED Requirements -### Requirement: mTLS-only sysmon-vm onboarding for Docker Compose -The system SHALL provide a token-based mTLS onboarding flow for sysmon-vm checkers targeting a Docker Compose deployment, without requiring SPIRE on the edge host. - -#### Scenario: Token-driven bundle download and install -- **WHEN** an operator issues an edge onboarding token for `checker:sysmon-vm` and runs `serviceradar-sysmon-vm --mtls --token --host --poller-endpoint 192.168.1.218:` on a darwin/arm64 or Linux edge host -- **THEN** sysmon-vm SHALL download an onboarding bundle (CA, client cert/key, expected endpoints) over HTTPS from Core (or the enrollment handler), install it to `/etc/serviceradar/certs` (or the configured writable path), and start with mTLS using the provided poller endpoint -- **AND** the poller/core in the Compose stack SHALL accept the sysmon-vm connection because the client certificate chains to the Compose CA. - -#### Scenario: Offline or pre-fetched bundle use -- **WHEN** an operator supplies a pre-fetched bundle via `--bundle /path/to/bundle.tar.gz` alongside `--mtls` -- **THEN** sysmon-vm SHALL validate the bundle integrity (including CA/client cert/key presence and expiry) and proceed with the same mTLS startup without contacting Core. - -### Requirement: Docker Compose CA issues edge enrollment bundles -The Docker Compose deployment SHALL generate or accept a TLS CA, issue leaf certificates for internal services, and issue per-edge sysmon-vm client certificates via onboarding tokens or bundle requests. - -#### Scenario: Compose CA generation and reuse -- **WHEN** the Compose stack starts without a pre-provided CA -- **THEN** it SHALL generate a CA once, reuse it for core/poller/agent/checker service certs, and expose an enrollment path that issues per-edge sysmon-vm bundles signed by that CA, so edge nodes can join with mTLS without SPIRE. - -#### Scenario: Controlled bundle issuance -- **WHEN** an admin requests an mTLS onboarding bundle for sysmon-vm via Core (edge package) or a Compose enrollment endpoint -- **THEN** the system SHALL enforce token validity/TTL, bind the client cert to the requested checker identity, include the CA and poller/core endpoints, and record issuance so operators can rotate or revoke the bundle if needed. diff --git a/openspec/changes/add-mtls-only-edge-onboarding/tasks.md b/openspec/changes/add-mtls-only-edge-onboarding/tasks.md deleted file mode 100644 index af7a79fe4..000000000 --- a/openspec/changes/add-mtls-only-edge-onboarding/tasks.md +++ /dev/null @@ -1,19 +0,0 @@ -## 1. Design & Decisions -- [x] 1.1 Lock mTLS bundle format and token semantics (fields, expiry, binding to edge host, SAN expectations for poller/core endpoints). -- [x] 1.2 Define Docker Compose CA generation/retention and enrollment flow (where bundles are minted, how poller/core trust the same CA, rotation story). - -## 2. Implementation -- [x] 2.1 Extend Core edge-package issuance/delivery to support mTLS bundle tokens for `checker:sysmon-vm` (serve CA + client cert/key + poller/core endpoints). Per-package certs now minted from the Compose CA, JSON bundle includes endpoints/server name + timestamps, and DB scan/upsert tests cover `security_mode`. -- [x] 2.2 Add sysmon-vm CLI/bootstrap path `--mtls` (env equivalent) that pulls the bundle via token/host, installs to `/etc/serviceradar/certs`, and boots mTLS to the configured poller endpoint (e.g., `192.168.1.218:`). -- [x] 2.3 Wire Docker Compose to generate/reuse the CA and issue per-edge bundles (CLI or HTTP handler), and ensure core/poller/agent certs come from the same CA. -- [x] 2.4 Document the Linux Compose + darwin/arm64 edge flow (token issuance, sysmon-vm install/run, rotation/cleanup), and note SPIRE ingress/agent as an optional path. -- [x] 2.5 Build and push amd64 images and update mTLS compose variant to consume tagged images. - -## 3. Validation -- [x] 3.1 E2E: start Compose with generated CA, issue mTLS edge token, run sysmon-vm on darwin/arm64 against `192.168.1.218:`, and verify mTLS connection to poller/core succeeds. -- Notes: Images pushed with `APP_TAG=sha-811f8732636f1a7569614850f560c74f706f8c4b`; mTLS compose stack up; sysmon-vm at `192.168.1.218:50110` is ingesting metrics into device `default:192.168.1.218` after restarting web/nginx. -- [ ] 3.2 Rotation/regeneration sanity: regenerate an edge bundle/token and confirm sysmon-vm can re-enroll without manual cleanup. - -### Notes on current blockers -- CNPG base rebuilt to 16.6 bookworm with glibc 2.41 and a runtime layer; new tag `16.6.0-sr2` removes the `GLIBC_2.38` Timescale/AGE load failure and the mTLS Compose stack now comes up cleanly with APP_TAG `sha-e5b9b615c9c17d104391d4847e1cfb757163ae15`. -- Compose CNPG logs show collation version warnings (DB created on glibc 2.31); refresh collation or wipe/reseed volume when convenient. diff --git a/openspec/changes/add-observability-timescale-rollups/design.md b/openspec/changes/add-observability-timescale-rollups/design.md new file mode 100644 index 000000000..8da60efa6 --- /dev/null +++ b/openspec/changes/add-observability-timescale-rollups/design.md @@ -0,0 +1,62 @@ +# Design: Observability Timescale rollups + +## Goals +- Make Observability KPI queries fast and stable by querying pre-aggregated rollups instead of raw hypertables. +- Align KPI semantics with the legacy UI where possible: + - Metrics: total, errors, slow, avg duration, p95 duration + - Traces: total traces, errors, avg duration, p95 duration + - Logs: counts by severity (error/warn/info/debug) + +## Data Sources +From `pkg/db/cnpg/migrations/00000000000001_schema.up.sql`: +- `logs` hypertable: + - `timestamp`, `severity_text`, `service_name`, `trace_id`, `span_id`, ... +- `otel_metrics` hypertable: + - `timestamp`, `duration_ms`, `http_status_code` (TEXT), `grpc_status_code` (TEXT), `is_slow` (BOOLEAN), `level` (TEXT), ... +- `otel_traces` hypertable: + - `timestamp`, `trace_id`, `span_id`, `parent_span_id`, `status_code` (INTEGER), `start_time_unix_nano`, `end_time_unix_nano`, ... + +## KPI Semantics + +### Metrics KPIs (from `otel_metrics`) +- **Total metrics**: `count(*)` +- **Errors**: count where any of these are true: + - `level` indicates error (case-insensitive match for `error`) + - `http_status_code` is one of the legacy “error” codes (commonly `400, 404, 500, 503`) + - `grpc_status_code` indicates non-success (implementation should confirm actual encoded values in data) +- **Slow spans**: `count(*) FILTER (WHERE is_slow)` +- **Avg duration**: `avg(duration_ms)` +- **P95 duration**: Prefer `percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms)` per bucket. If this proves too expensive, evaluate `timescaledb_toolkit` (tdigest/approx percentiles) as an extension in a follow-up. + +### Traces KPIs (from `otel_traces`) +The raw `otel_traces` hypertable stores spans. For “trace count” KPIs we need a trace-like 1-row-per-trace representation. + +**Proposed definition:** treat “root spans” as trace representatives: +- A “root span” is a span with `parent_span_id IS NULL` (or empty string; validate ingestion behavior). +- Assumption: there is exactly one root span per trace. This yields a trace-like count via `count(*)` over root spans. + +Traces KPIs per bucket: +- **Total traces**: `count(*)` over root spans +- **Errors**: count of root spans where `status_code = 2` (OTel status error) +- **Avg duration (ms)**: `avg((end_time_unix_nano - start_time_unix_nano) / 1e6)` +- **P95 duration (ms)**: `percentile_cont(0.95)` over the computed duration + +If the “single root span” assumption does not hold in practice, a follow-up design should introduce a trace summary hypertable keyed by `(trace_id)` and time-bucket rollups on top of it. + +### Log KPIs (from `logs`) +- Roll up by normalized level: + - Normalize `severity_text` to lowercase, and map common variants (`warn`/`warning`) to `warn`. +- Counts per bucket and level: `count(*) FILTER (WHERE normalized_level = 'error')`, etc. + +## Rollup Grain & Policies +- Bucket size: **5 minutes** (matches other ServiceRadar telemetry rollups). +- Refresh policy: run every 5 minutes with an end offset to avoid late-arriving data (exact offsets to be tuned during implementation). +- Retention: unchanged for base hypertables; rollup retention may match the base retention window (commonly 3 days) when enabled. + +## Compatibility & Resilience +- Continuous aggregates can break after TimescaleDB extension OID changes; migrations should favor: + - Idempotent creation + - Explicit policy definitions + - An operator runbook for recreating rollups if refresh jobs fail +- This change should be compatible with (and not conflict with) the existing CAGG recovery work tracked in `fix-cnpg-continuous-aggregate-cache-error`. + diff --git a/openspec/changes/add-observability-timescale-rollups/proposal.md b/openspec/changes/add-observability-timescale-rollups/proposal.md new file mode 100644 index 000000000..6d8a165f1 --- /dev/null +++ b/openspec/changes/add-observability-timescale-rollups/proposal.md @@ -0,0 +1,25 @@ +# Change: Add TimescaleDB rollups for Observability KPIs + +## Why +- The `web-ng` Observability/Analytics KPI cards currently rely on real-time aggregate queries over high-volume hypertables (`logs`, `otel_metrics`, `otel_traces`). +- These queries are slow at scale and appear to be inconsistent with the legacy UI (ex: traces/metrics totals and error-rate calculations). +- TimescaleDB continuous aggregates provide fast, predictable KPI queries by precomputing time-bucketed summaries on a schedule. + +## What Changes +- Add CNPG migrations that create TimescaleDB continuous aggregates (CAGGs) for: + - `otel_metrics` KPI rollups (total, errors, slow, avg/p95 duration) + - `otel_traces` KPI rollups (trace-like counts based on root spans, errors, avg/p95 duration) + - `logs` severity rollups (counts by level) +- Add refresh policies for each CAGG so dashboards can query rollups rather than raw hypertables. +- Document validation queries and operational guidance (refresh, job health, failure recovery). + +## Non-Goals +- No UI cut-over is required in this change: `web-ng` MAY continue querying raw hypertables until we explicitly switch it. +- No changes to telemetry ingestion or data retention policies. +- No migration of the legacy React/Next.js web stack. + +## Impact +- Affected capability: `cnpg` (new database objects + policies). +- Affected code paths (future follow-up): `web-ng` SRQL dashboards and KPI cards can be updated to query rollups for speed/correctness. +- Operational considerations: continuous aggregate policies must be monitored (job errors) and may require recreation if Timescale function OIDs change (see prior change `fix-cnpg-continuous-aggregate-cache-error`). + diff --git a/openspec/changes/add-observability-timescale-rollups/specs/cnpg/spec.md b/openspec/changes/add-observability-timescale-rollups/specs/cnpg/spec.md new file mode 100644 index 000000000..7b384997a --- /dev/null +++ b/openspec/changes/add-observability-timescale-rollups/specs/cnpg/spec.md @@ -0,0 +1,20 @@ +# cnpg Specification (Delta): Observability rollups + +## ADDED Requirements + +### Requirement: Timescale continuous aggregates for observability KPIs +The system SHALL create TimescaleDB continuous aggregates (5-minute buckets) that summarize observability KPIs from `logs`, `otel_metrics`, and `otel_traces` so dashboards can query rollups instead of raw hypertables. + +#### Scenario: Rollup objects exist after migration +- **GIVEN** a CNPG cluster where `pkg/db/cnpg/migrations/_observability_rollups.up.sql` has been applied +- **WHEN** an operator queries `timescaledb_information.continuous_aggregates` (or `pg_matviews`) +- **THEN** continuous aggregates exist for `logs` severity counts, `otel_metrics` KPIs, and trace-like KPIs derived from `otel_traces` root spans. + +### Requirement: Rollups include refresh policies and recovery guidance +The system SHALL attach refresh policies for each observability rollup and document verification and recovery steps for operators. + +#### Scenario: Refresh jobs exist and report healthy state +- **GIVEN** the observability rollups are installed +- **WHEN** an operator queries `timescaledb_information.jobs` and `timescaledb_information.job_errors` +- **THEN** each rollup has a refresh job configured and no recent refresh failures are reported. + diff --git a/openspec/changes/add-observability-timescale-rollups/tasks.md b/openspec/changes/add-observability-timescale-rollups/tasks.md new file mode 100644 index 000000000..ad8c86209 --- /dev/null +++ b/openspec/changes/add-observability-timescale-rollups/tasks.md @@ -0,0 +1,28 @@ +# Tasks + +## 1. Schema / Migrations (CNPG) +- [ ] Confirm the next migration number under `pkg/db/cnpg/migrations/` (currently only `00000000000001_schema.up.sql` exists, so the next is expected to be `00000000000002`). +- [ ] Add an idempotent CNPG migration `pkg/db/cnpg/migrations/_observability_rollups.up.sql` that: + - [ ] Creates a 5-minute CAGG for `otel_metrics` KPIs (total/errors/slow/avg/p95). + - [ ] Creates a 5-minute CAGG for trace-like KPIs based on `otel_traces` root spans (total/errors/avg/p95). + - [ ] Creates a 5-minute CAGG for `logs` severity counts. + - [ ] Adds continuous aggregate refresh policies for each CAGG. + - [ ] (Optional) Adds retention policies for the CAGGs (match base retention window when enabled). +- [ ] Add a down migration if the project requires down migrations for CNPG (otherwise document no-op behavior). + +## 2. Documentation +- [ ] Add an operator runbook describing: + - [ ] How to verify rollups exist (`\d+` / `pg_matviews`) + - [ ] How to verify refresh policies (`timescaledb_information.jobs`) + - [ ] How to inspect failures (`timescaledb_information.job_errors`) + - [ ] Manual refresh commands for backfills (`CALL refresh_continuous_aggregate(...)`) + +## 3. Verification +- [ ] Validate that rollup totals match raw-table totals for a fixed time window (ex: last 1h / last 24h), within expected ingestion lateness. +- [ ] Validate dashboard latency improvements by comparing: + - [ ] Raw SRQL stats queries + - [ ] Rollup-backed queries (sum buckets in window) + +## 4. Follow-ups (Not part of this change) +- [ ] Update `web-ng` Observability + Analytics KPI cards to query rollups when available, with a safe fallback to raw hypertables. +- [ ] If needed, evaluate `timescaledb_toolkit` for cheaper p95 percentiles at scale. diff --git a/openspec/changes/add-prometheus-monitoring-bridge/proposal.md b/openspec/changes/add-prometheus-monitoring-bridge/proposal.md deleted file mode 100644 index 03598b141..000000000 --- a/openspec/changes/add-prometheus-monitoring-bridge/proposal.md +++ /dev/null @@ -1,14 +0,0 @@ -# Change: Add Prometheus Monitoring Bridge for ServiceRadar - -## Why -Operators run kube-prometheus-stack in the `monitoring` namespace and want to scrape ServiceRadar's internal OTEL metrics (identity reconciliation, pollers, sync, collectors) without replacing our built-in lightweight OTEL pipeline. We need first-class Prometheus/ Grafana surfaces so teams can reuse their standard monitoring while keeping the edge-friendly OTEL path for on-site deployments. - -## What Changes -- Expose ServiceRadar metrics to Prometheus (ServiceMonitors/PodMonitors and scrapeable `/metrics` endpoints) across core, poller, sync, otel-collector, and registry identity metrics. -- Add dual-telemetry support so OTEL exporters can fan out to both our internal collector and external Prometheus/remote-write targets without losing current behavior. -- Ship Grafana dashboards (identity reconciliation, ingestion/poller throughput, OTEL collector health) consumable by kube-prom-stack. -- Provide Helm/kustomize wiring so monitoring artifacts live in the `monitoring` namespace with labels compatible with kube-prom-stack discovery. - -## Impact -- Affected specs: `observability-integration` -- Affected code: `pkg/registry/identity_metrics.go`, `pkg/logger/otel.go`, OTEL collector config (`k8s/demo/base/serviceradar-otel.yaml`, Helm charts), Service/ServiceMonitor manifests, Grafana dashboards assets. diff --git a/openspec/changes/add-prometheus-monitoring-bridge/specs/monitoring-bridge/spec.md b/openspec/changes/add-prometheus-monitoring-bridge/specs/monitoring-bridge/spec.md deleted file mode 100644 index dceeafd8d..000000000 --- a/openspec/changes/add-prometheus-monitoring-bridge/specs/monitoring-bridge/spec.md +++ /dev/null @@ -1,28 +0,0 @@ -## ADDED Requirements -### Requirement: Prometheus Pull Endpoint -The system SHALL expose OTEL metrics via a Prometheus pull endpoint (`/metrics`) on core, poller, sync, and faker with configurable enablement, bind address, and port per service. - -#### Scenario: Metrics scrape enabled -- **WHEN** metrics are enabled for a service -- **THEN** the service listens on the configured address/port and serves Prometheus format at `/metrics` - -### Requirement: Helm Scrape Configuration -The deployment SHALL include Helm values and templates to enable Prometheus scraping (annotations or ServiceMonitor) with configurable namespace/selector, scrape interval, TLS/mtls options, and per-component toggles. - -#### Scenario: Enable scraping in demo cluster -- **WHEN** Helm values set `core.metrics.prometheus.enabled=true` -- **THEN** the rendered manifests expose the metrics port/path and include scrape configuration for Prometheus in the target namespace - -### Requirement: Metrics Coverage and Stability -The system SHALL expose key identity metrics (promotion, drift, availability) and core service health metrics in the Prometheus endpoint, and SHALL document metric names/descriptions so alerts remain stable. - -#### Scenario: Identity drift metrics visible -- **WHEN** Prometheus scrapes the core metrics endpoint with identity reconciliation enabled -- **THEN** it can read `identity_cardinality_current`, `identity_cardinality_baseline`, `identity_cardinality_drift_percent`, and `identity_cardinality_blocked` - -### Requirement: Alerting Guidance -The system SHALL provide alert templates/runbooks for Prometheus rules covering identity drift, promotion failures, and metrics scrape health, including recommended thresholds for demo/prod. - -#### Scenario: Alert template referenced -- **WHEN** an operator follows the Prometheus integration docs -- **THEN** they have sample alert rules (e.g., drift over baseline, promotion blocked) and runbook steps to respond diff --git a/openspec/changes/add-prometheus-monitoring-bridge/specs/observability-integration/spec.md b/openspec/changes/add-prometheus-monitoring-bridge/specs/observability-integration/spec.md deleted file mode 100644 index 8679970db..000000000 --- a/openspec/changes/add-prometheus-monitoring-bridge/specs/observability-integration/spec.md +++ /dev/null @@ -1,35 +0,0 @@ -## ADDED Requirements -### Requirement: Prometheus Scrape Surfaces for Core Services -ServiceRadar SHALL expose Prometheus-compatible metrics endpoints for core, registry (identity), poller, sync, and OTEL collector components, addressable from the `monitoring` namespace without bypassing existing auth/TLS controls. - -#### Scenario: Monitoring namespace scrapes registry metrics -- **WHEN** kube-prom-stack in namespace `monitoring` discovers ServiceMonitor/PodMonitor objects for `serviceradar-registry` -- **THEN** it can scrape a stable `/metrics` endpoint that includes identity_* metric families without disabling SPIFFE/TLS if enabled - -### Requirement: Dual Telemetry Destinations -ServiceRadar telemetry (logs/metrics/traces) SHALL support simultaneous delivery to the built-in OTEL collector and to external Prometheus/remote-write targets via configuration without changing default edge-friendly behavior. - -#### Scenario: Enable external Prometheus alongside internal OTEL -- **WHEN** an operator sets configuration to add a Prometheus/remote-write exporter while keeping the default OTLP exporter enabled -- **THEN** metrics continue flowing to the internal OTEL collector AND are exported to the external Prometheus target without duplicate instrumentation or process restarts beyond config reload - -### Requirement: kube-prom-stack Integration Artifacts -The repository SHALL ship ServiceMonitor/PodMonitor manifests and Helm values that place monitoring resources in the `monitoring` namespace with label selectors compatible with kube-prom-stack discovery. - -#### Scenario: Helm install with monitoring enabled -- **WHEN** Helm values set `monitoring.enabled=true` -- **THEN** the rendered manifests include ServiceMonitor/PodMonitor objects in namespace `monitoring` targeting ServiceRadar components’ metrics ports with appropriate scrape intervals and TLS/auth settings - -### Requirement: Grafana Dashboards for ServiceRadar Metrics -Grafana dashboards SHALL be provided that visualize identity reconciliation metrics, poller/sync throughput, OTEL collector health, and key availability indicators, and they SHALL be importable by kube-prom-stack. - -#### Scenario: Dashboard import -- **WHEN** an operator imports the shipped JSON dashboards into Grafana -- **THEN** panels render identity_* counters/gauges, scrape success rates, and collector/exporter status using the Prometheus data source without manual query edits - -### Requirement: Alerting Rules in Monitoring Namespace -PrometheusRule resources for ServiceRadar SHALL be installable in the `monitoring` namespace, covering identity reconciliation health and scrape/latency regressions, with labels matching kube-prom-stack alertmanager routing. - -#### Scenario: Identity reconciliation alert fires -- **WHEN** `identity_promotion_run_age_ms` exceeds the configured threshold or `identity_promotions_blocked_policy_last_batch` stays >0 for the alert window -- **THEN** the PrometheusRule in `monitoring` raises an alert with labels suitable for Alertmanager routing (severity, service) diff --git a/openspec/changes/add-prometheus-monitoring-bridge/tasks.md b/openspec/changes/add-prometheus-monitoring-bridge/tasks.md deleted file mode 100644 index 302636eb3..000000000 --- a/openspec/changes/add-prometheus-monitoring-bridge/tasks.md +++ /dev/null @@ -1,20 +0,0 @@ -## 1. Discovery -- [ ] 1.1 Inventory existing metrics/ports (core, registry identity, poller, sync, otel collector) and confirm scrape paths/labels. -- [ ] 1.2 Map kube-prom-stack expectations (namespace `monitoring`, label selectors, ServiceMonitor/PodMonitor defaults, RBAC). - -## 2. Prometheus Surfacing -- [ ] 2.1 Expose `/metrics` or OTEL-prom exporter on core/registry/poller/sync; document port/label conventions. -- [ ] 2.2 Add ServiceMonitor/PodMonitor resources (and namespace/label wiring) for demo + Helm chart values. -- [ ] 2.3 Ensure TLS/mTLS story (SPIFFE or http) and authentication alignment with monitoring stack. - -## 3. Dual Telemetry Outputs -- [ ] 3.1 Extend OTEL logger/metric config to support multiple exporters (existing OTLP + Prometheus/remote write) without breaking defaults. -- [ ] 3.2 Provide config samples/values for enabling external Prometheus while keeping edge-friendly OTEL path. - -## 4. Dashboards & Alerts -- [ ] 4.1 Add Grafana dashboards for identity reconciliation, ingest/poller health, and OTEL collector status, packaged for kube-prom-stack import. -- [ ] 4.2 Wire alerting rules (e.g., identity rules) into monitoring namespace with labels matching kube-prom-stack. - -## 5. Validation -- [ ] 5.1 Validate scrape success in demo cluster (`monitoring` namespace) and verify metrics families (identity_* etc.). -- [ ] 5.2 Run openspec validate for change and update tasks to completed after implementation. diff --git a/openspec/changes/add-serviceradar-web-ng-foundation/design.md b/openspec/changes/add-serviceradar-web-ng-foundation/design.md new file mode 100644 index 000000000..441b30997 --- /dev/null +++ b/openspec/changes/add-serviceradar-web-ng-foundation/design.md @@ -0,0 +1,208 @@ +# Design: `serviceradar-web-ng` functionality map + +## Philosophy: "The Core is a Daemon" +The Go application (`serviceradar-core`) is treated strictly as a **Data Ingestion Engine**. It scans networks, receives SNMP traps/Netflow, and writes to Postgres. It does **not** serve user traffic. + +Phoenix (`serviceradar-web-ng`) reads that ingestion data and serves the UI/API using its own isolated application state. + +## 1. Logic Porting Map (Go -> Elixir) +The following domains must be re-implemented. Note that Auth is a *replacement*, not a port. + +| Domain | Legacy Go Source | Elixir Context (`ServiceRadarWebNG.*`) | Responsibility | +| :--- | :--- | :--- | :--- | +| **Inventory** | `unified_devices.go` | `Inventory` | Listing devices, Editing metadata (Shared Data). | +| **Topology** | `cnpg_discovery.go` | `Topology` | Querying the Apache AGE graph (Shared Data). | +| **Edge** | `edge_onboarding.go` | `Edge` | CRUD for onboarding packages (Shared Data). | +| **Infra** | `pollers.go` | `Infrastructure` | Viewing Poller status/health (Shared Data). | +| **Auth** | `auth.go` | `Accounts` | **NEW:** Fresh user management table (`ng_users`). | + +## 2. Authentication Strategy (Fresh Start) +- **Table:** Create a new table `ng_users` (or similar) via `phx.gen.auth`. +- **Isolation:** Do NOT touch the legacy `public.users` table used by Go. This prevents collision and allows the Go daemon to continue internal operations if it relies on that table (though it shouldn't for ingestion). +- **Setup:** The first user to access the new UI will register a fresh admin account. + +## 3. The API Gateway (Internal) +Phoenix will mount a `ServiceRadarWebNG.Api` scope. +- `POST /api/query` -> `QueryController.execute` (Calls Rust NIF for translation, then executes SQL via Ecto). +- All other legacy endpoints are deprecated and replaced by LiveView or new JSON endpoints as needed. + +## 4. Embedded SRQL Translation (Rustler) +We are transforming `srql` from a **standalone HTTP service** to an **embedded translator library**. +- **No removal:** The `rust/srql` crate stays in the repo and continues to be maintained. +- **Additive migration:** The translator-only API is added without breaking existing SRQL server behavior so the legacy stack can keep using the SRQL HTTP service during the cutover. +- **Refactor:** Ensure `rust/srql` exposes a public translation API that returns: + - parameterized SQL (`$1`, `$2`, ...) + - bind parameters (in order) + - pagination metadata (next/prev cursor, limit) +- **NIF:** Phoenix calls into SRQL via Rustler for translation only (pure computation). +- **Execution:** Phoenix executes the SQL via `Ecto.Adapters.SQL.query/4` (or equivalent) using the existing `ServiceRadarWebNG.Repo` pool. +- **Flow:** `UI/API -> Phoenix -> Rustler (translate) -> SQL -> Repo (execute) -> JSON`. +- **Deployment change:** The standalone `srql` HTTP container is no longer needed—requests are served by Phoenix. + +## 5. Schema Ownership vs Data Access +- **Schema ownership (DDL):** Go Core owns the table structure for `unified_devices`, `pollers`, metrics, etc. Phoenix does NOT generate migrations for these. +- **Data access (DML):** Phoenix CAN read AND write to shared tables (e.g., editing device names, updating policies). +- **Phoenix-owned tables:** `ng_users`, `ng_sessions`, etc. Phoenix owns both schema and data. + +## 6. Deployment +- The `web-ng` container will be deployed alongside `core`. +- `core` writes to DB (Telemetry/Inventory ingestion). +- `web-ng` reads/writes DB (Inventory edits, Auth, Settings). +- External Load Balancer routes traffic to `web-ng`. + +## 7. Property-Based Testing (StreamData) + +### Goals +- Catch edge-cases early for new Elixir contexts (especially token formats, parsing/validation, and serialization). +- Keep most property tests pure (no DB) so they run fast and deterministically in CI. +- Add targeted “safety net” properties at boundary layers (NIF input, JSON decoding/validation, changesets) to ensure garbage input never crashes the BEAM or the request process. + +### Framework Choice +- Use `StreamData` via `ExUnitProperties` (standard Elixir ecosystem property-based testing). + +### Conventions +- Put generators in `test/support/generators/*.ex` (shared across properties). +- Put properties in `test/property/**/*_property_test.exs`. +- Tag long-running properties with `@tag :slow_property` and keep CI defaults bounded (e.g., 50–200 cases/property) with an env override for deeper runs. +- Property tests MUST be part of normal `mix test` execution (no separate “optional” suite), unless explicitly excluded for performance reasons with a documented CI job. + +### Starter Properties (Examples) +- **NIF Safety Net:** For any printable string input, `ServiceRadarWebNG.SRQL.query/1` MUST return `{:ok, _}` or `{:error, _}` and MUST NOT crash the test process. +- **Dual-Write Consistency:** For any sequence of create/update/delete operations on a Device, the Postgres row state MUST match the AGE node state after each operation. +- **Changeset Fuzzing:** For generated JSON-like maps that mimic Go ingestion shapes, changesets MUST return `{:ok, _}` or `{:error, changeset}` and MUST NOT raise. + +## 8. SRQL-First Analytics UI and Composable Dashboards + +### Principle: "Query-first UI" +For read-oriented pages (devices, pollers, metrics, traces, events, dashboards), SRQL is the primary interface: +- Pages declare a default SRQL query. +- The page executes that SRQL query via `POST /api/query`. +- The global navigation displays the exact SRQL query used to render the view. +- Users can edit and re-run the SRQL query to drive the page. + +Non-analytics flows (auth, settings, edge onboarding CRUD) can remain context/API driven and use Ecto directly. + +### Query Bar Contract +The app provides a shared "Query Bar" in the top navigation for SRQL-driven pages: +- Shows the active SRQL query string for the current page. +- Allows editing and submission (re-runs query and updates page state). +- Provides bounded error handling (invalid query shows an error panel, never crashes LiveView). +- Supports deep-linking by encoding the query in the URL (for shareable dashboards/pages). +- Provides a query builder toggle (icon) that expands a builder panel under the query bar. + +### Query Builder +The query builder is a UI for constructing SRQL safely: +- The SRQL text input remains the source of truth (execution always runs SRQL). +- The builder produces SRQL output by updating the query bar text. +- The builder attempts to parse/reflect the existing SRQL into builder state when possible. +- If SRQL cannot be represented, the builder shows a bounded "read-only/limited" state and avoids destructive rewrites. +- The builder UI uses a visual, node/pill style with connector affordances (instead of a generic stacked form) and supports multiple filters. +- The builder SHOULD be driven by a centralized catalog of entities and field options so adding a new SRQL collection is a data/config change, not a template rewrite. + +### SRQL Page Helpers +SRQL-driven pages share common patterns (query state, builder state, deep-linking, and execution). These are centralized in a helper module: +- Pages initialize SRQL state (default query + builder state). +- `handle_params/3` resolves `?q=` overrides and executes SRQL via the embedded translator + Ecto. +- Common `handle_event/3` handlers manage query editing, submit, builder toggle, and builder edits. + +## 9. UI Components and Navigation Layout + +### Component Organization +To keep the UI modular and swappable (Tailwind/daisyUI vs future alternatives), we centralize primitives as Phoenix function components: +- `UIComponents`: reusable primitives (buttons, inputs, panels, badges, tabs, dropdowns, toolbars) +- `SRQLComponents`: SRQL-specific composites (query bar, builder, results table, auto-viz panels) + +Feature LiveViews should prefer calling these components instead of hardcoding CSS classes in templates. + +### Navigation Layout +To avoid a cluttered top bar as more SRQL analytics pages are added: +- The SRQL query bar remains in the top header (always visible and consistent across SRQL-driven pages). +- Authenticated navigation is moved into a left sidebar (responsive drawer on mobile). + +## 10. Future SRQL Composition (Open Question) +Users will likely want to enrich a primary query with related collections (e.g., "devices matching X" plus "events for those devices"). +This suggests a future SRQL DSL extension for composing/expanding queries across entities (e.g., subquery piping or relationship expansion), which should be proposed explicitly as a follow-on change. + +### Composable Dashboard Engine +Dashboards are built around SRQL queries and render "widgets" based on the query outputs: +- A dashboard definition can contain one or more SRQL queries. +- Each query result is mapped to one or more visualization candidates. +- The user can select a visualization, and the dashboard composes the widgets. + +### Result Shape Detection and Visualization Hints +The dashboard engine should prefer explicit metadata from SRQL translation/execution over heuristics: +- Column names and types (time, numeric, categorical, id-like) +- Semantic hints (unit, series key, suggested visualization types) +- Pagination and time window semantics for hypertables + +If explicit hints are unavailable, the engine can fall back to conservative heuristics (e.g., if a "time" column exists, suggest a time series chart). + +### TimescaleDB and Apache AGE Coverage +Composable dashboards must support both: +- TimescaleDB hypertable patterns (time windows, aggregation/downsampling) +- Apache AGE relationship exploration (device/asset/interface graphs) + +Preferred approach: keep SRQL as the unifying interface by extending the SRQL DSL/translator to express graph-oriented queries (compiled into SQL that uses AGE `cypher()`), so dashboards can treat graph data as another SRQL-backed data source. + +### Extensibility +The dashboard system must be easy to extend: +- Provide stable Elixir behaviours for new widgets/visualizations. +- Keep visualizations pure where possible (inputs: SRQL string + result set + metadata; output: a LiveComponent render). +- Make it straightforward to add a new visualization without modifying core dashboard code (registry/discovery pattern). + +## 11. Legacy UI Parity Map (`web/` -> `web-ng/`) + +The legacy UI in `web/` contains several top-level destinations that users expect. This table is the “porting backlog” for `web-ng/`. + +| Legacy Route (`web/`) | Phoenix Route (`web-ng/`) | Status | Notes | +| :--- | :--- | :--- | :--- | +| `/dashboard` | `/dashboard` | ✅ Exists | SRQL-driven dashboard engine with plugins (timeseries, categories, topology, table). | +| `/analytics` | `/analytics` | ✅ Exists | Operator overview hub (KPIs, charts, severity summaries, drill-down). | +| `/devices` | `/devices` | ✅ Exists | Inventory table includes Online/Offline + bulk ICMP sparkline health column. | +| `/devices/:id` | `/devices/:device_id` | ✅ Exists | SRQL-driven details page with metric charts (cpu/memory/disk). | +| `/events` | `/events` | ✅ Exists | SRQL list page. | +| `/logs` | `/logs` | ✅ Exists | SRQL list page. | +| `/services` | `/services` | ✅ Exists | SRQL list page. | +| `/interfaces` | `/interfaces` | ✅ Exists | SRQL list page. | +| `/metrics` | (new) `/metrics` | ❌ Missing | Legacy “system metrics” views; can be recreated via SRQL metrics entities + charts. | +| `/network` | (new) `/network` | ❌ Missing | Network discovery, sweeps, SNMP summaries (likely mixes SRQL tables + purpose-built dashboards). | +| `/observability` | (new) `/observability` | ❌ Missing | Logs/traces/metrics tabs; SRQL can cover read views, but may need richer UI patterns. | +| `/identity` | (new) `/identity` | ❌ Missing | Identity reconciliation UI; likely needs non-trivial workflow UIs beyond SRQL tables. | +| `/admin/*` | (new) `/admin/*` | ❌ Missing | Edge onboarding packages UI and other admin tools have not been ported. | + +## 12. UI Component Strategy (Tailwind + daisyUI + MCP) + +- `web-ng/` uses Tailwind + daisyUI for styling. +- All reusable primitives MUST live in `ServiceRadarWebNGWeb.UIComponents` (and SRQL composites in `ServiceRadarWebNGWeb.SRQLComponents`) so feature LiveViews do not hand-roll class soup. +- When introducing new UI components, prefer daisyUI component patterns (cards, stats, badges, tables, dropdowns, tooltips) and derive their markup from the daisyUI snippet catalog (via the daisy MCP server) before custom-building. + +## 13. Charting & Visualization Strategy + +### Principles +- Prefer server-rendered SVG for small “micro charts” (sparklines) to keep LiveView fast and dependency-light. +- Use the existing dashboard plugin system for “real” charts on `/dashboard` and the planned `/analytics` hub. +- Keep visualizations bounded: cap points/series and degrade gracefully to tables when results are not chartable. + +### Planned Additions +- Add an `/analytics` LiveView implemented as a curated dashboard definition (multiple SRQL queries -> multiple panels). +- Extend visualization support where needed: + - “KPI/Stat” panels (single-value outputs). + - “Donut/Pie” or “Stacked” availability chart (optional; can start as categories bars). + +## 14. ICMP Sparkline in Device Inventory (Data + Performance) + +### Data source +ICMP latency is ingested into the database and is queryable via SRQL (typically through `timeseries_metrics` where `metric_type = "icmp"`). + +### Query strategy (avoid N+1) +- The device list page MUST NOT fetch metrics per row. +- Fetch ICMP sparkline data in a single bulk query scoped to the current page’s device IDs and a fixed time window (e.g., last 1h). +- Downsample on the server (TimescaleDB `time_bucket`) to a small, fixed point count suitable for an inline sparkline (e.g., <= 20 points per device). + +### Rendering strategy +- Render each sparkline as SVG (polyline/area) with a color derived from the latest latency bucket (e.g., green/yellow/red thresholds). +- Tooltip can be implemented with a native `` or daisyUI tooltip patterns, but must not require per-point client JS. + +### Guardrails +- Cap device count (page size) and point count to prevent heavy queries. +- Use bounded refresh semantics (e.g., manual refresh or a conservative interval), and ensure empty/no-data cases render cleanly. diff --git a/openspec/changes/add-serviceradar-web-ng-foundation/proposal.md b/openspec/changes/add-serviceradar-web-ng-foundation/proposal.md new file mode 100644 index 000000000..fe41052b7 --- /dev/null +++ b/openspec/changes/add-serviceradar-web-ng-foundation/proposal.md @@ -0,0 +1,54 @@ +# Change: Add `serviceradar-web-ng` (The New Monolith) + +## Why +- **Complete Replacement:** We are replacing the existing React/Next.js frontend and the Go HTTP API entirely. The new Phoenix application will be the sole interface for users and API clients. +- **Architecture Shift:** Moving from Microservices (Kong + Go API + Rust Service + Next.js) to a Monolith (Phoenix + Embedded Rust). +- **Core Isolation:** `serviceradar-core` (Go) is being demoted to a background ingestion daemon. It will continue to write to the DB, but its HTTP endpoints will be bypassed and eventually ignored. +- **Simplification:** We are removing the need for the Kong API Gateway and the standalone SRQL HTTP service container. SRQL itself remains—it becomes an embedded library called via Rustler NIF. + +## What Changes +- **New App:** Create `web-ng/` hosting `:serviceradar_web_ng`. +- **SRQL (Translator-Only):** Embed `rust/srql` via Rustler (NIF) as a *pure translator* that converts SRQL -> parameterized SQL (+ bind params + pagination metadata). Phoenix executes the SQL via Ecto/Postgrex using the existing `Repo` connection pool. + - **Compatibility:** Translator-only mode MUST be additive. The existing SRQL HTTP service (and its query-execution behavior) remains supported during the migration. +- **SRQL-First Analytics:** All read-oriented analytics views (devices, pollers, metrics, traces, events, dashboards) are driven by SRQL queries executed via `POST /api/query` (translate in Rust, execute in Phoenix via Ecto). + - **Query Visibility:** SRQL-driven pages MUST display the active SRQL query in a top navigation input (editable to re-run). + - **Composable Dashboards:** Dashboards are built from one or more SRQL queries and auto-generate visualizations when the result shape is recognized; the visualization system MUST be modular and extensible. +- **UI Parity & Improvements (Legacy `web/` -> `web-ng/`):** + - Recreate the missing “Analytics” hub (summary KPIs + charts) using SRQL-driven queries and dashboard visualizations. + - The Analytics hub SHOULD serve as an operator overview (quick health check) with drill-down navigation into detail pages. + - Enhance the Device Inventory table to include health signals (online/offline + ICMP sparkline latency) without per-row N+1 queries. + - Standardize UI primitives using Tailwind + daisyUI, with components expressed via Phoenix function components. +- **Database:** Connect Ecto to the existing Postgres/AGE instance. + - *Telemetry Data:* Mapped to existing tables (Read-Only). + - *App Data:* Fresh tables created/managed by Phoenix (Read/Write). +- **Auth:** **Fresh Start.** Implement standard `phx.gen.auth` using a new table (e.g., `ng_users`). We will **not** use the legacy `users` table or migrate old credentials. +- **Logic Porting:** Re-implement user-facing business logic from Go into Elixir Contexts. +- **Testing:** Establish property-based testing patterns early using `StreamData` (`ExUnitProperties`) for core invariants (token formats, parsing/validation, and “never crash” boundaries like NIF input handling). + +## Non-Goals +- **No Go Changes:** We will not modify `serviceradar-core` source code. +- **No Auth Migration:** Legacy user accounts are abandoned. Users will register fresh accounts in the new system. +- **No API Compatibility:** The new API will follow Phoenix conventions, not strictly mimic the legacy Go API structure. +- **No SRQL DB Runtime in NIF:** SRQL MUST NOT open database connections, manage a separate pool, or require a Tokio runtime inside the Phoenix process. Query execution is handled by Phoenix via Ecto. +- **No SRQL Writes:** SRQL is the query engine for reads/analytics, not the primary mechanism for writes or stateful workflows (auth/settings/edge onboarding remain API + Ecto driven). +- **No Kubernetes Cutover Yet:** We will iterate in the local Docker Compose stack before making any k8s routing/cutover changes. + +## Impact +- **Routing:** Local Docker Compose Nginx routes `/*` and `/api/*` to Phoenix for iteration; Kubernetes cutover is deferred. +- **Security:** Phoenix becomes the sole Authority for Identity. +- **Performance:** Elimination of internal HTTP hops for Query and API responses. + - Translator-only SRQL reduces overhead (no extra DB pool/runtime in NIF) and consolidates DB access under Ecto. + +## Status +- In progress (local compose iteration; k8s cutover deferred). +- SRQL translator-only pivot complete: Rust now translates SRQL -> parameterized SQL + typed bind params + pagination metadata; Phoenix executes via Ecto using `ServiceRadarWebNG.Repo`. +- Added safety checks: unit tests and debug-mode bind-count validation to ensure SQL placeholder arity matches returned params. +- SRQL-first analytics UX implemented (query bar + builder + SRQL-driven list pages). +- Dashboard renders charts/graphs when SRQL metadata supports it (Timeseries/Topology plugins), plus table fallback. +- Device details page exists at `/devices/:device_id` (SRQL-driven), including related CPU/memory/disk metric charts. +- Added `/analytics` hub as an operator overview (KPI cards, charts, severity summaries, drill-down links). +- Upgraded `/devices` list for at-a-glance health (Online/Offline + bulk ICMP sparkline; no per-row N+1 queries). +- Improved SRQL-driven list UX: table cell formatting (timestamps, URLs, severity badges) and page-level panels/quick filters for Events/Logs/Services. +- Device details charts improved: hide empty metric sections, default CPU to aggregated trend, and render shaded/annotated timeseries charts. +- UI parity gaps remain vs `web/`: `/network`, `/observability`, `/identity`, and `/admin/*` are not yet ported. +- UI remains Tailwind + daisyUI (no Mishka Chelekom adoption at this time). diff --git a/openspec/changes/add-serviceradar-web-ng-foundation/tasks.md b/openspec/changes/add-serviceradar-web-ng-foundation/tasks.md new file mode 100644 index 000000000..4771b4fb3 --- /dev/null +++ b/openspec/changes/add-serviceradar-web-ng-foundation/tasks.md @@ -0,0 +1,220 @@ +## 1. Foundation & Plumbing + +### Application & DB +- [x] 1.1 Scaffold `serviceradar_web_ng` (Phoenix 1.7+, LiveView) in `web-ng/`. +- [x] 1.2 Configure `Ecto` to connect to the existing CNPG/AGE database. + - [x] *Note:* Support remote dev via `CNPG_*` env vars + TLS client certs. + - [x] *Note:* Publish CNPG for workstation access via Compose `CNPG_PUBLIC_BIND`/`CNPG_PUBLIC_PORT` and cert SANs via `CNPG_CERT_EXTRA_IPS`. +- [x] 1.3 Port the Graph Abstraction (`ServiceRadarWebNG.Graph`) from `Guided` to support AGE queries. + - [x] *Note:* Add `mix graph.ready` to validate AGE connectivity. + +### SRQL Engine (Rustler) +- [x] 1.4 Refactor `rust/srql` to expose public library functions. +- [x] 1.5 Implement `native/srql_nif` in Phoenix (Async NIF pattern). +- [x] 1.6 Implement `ServiceRadarWebNG.SRQL` module. +- [x] 1.6a Pivot to **translator-only** SRQL architecture (no DB/runtime in NIF). + - [x] Refactor `rust/srql` to expose a stable translate API (SQL + bind params + pagination metadata). + - [x] Update `native/srql_nif` to export translate-only functions (no Tokio runtime, no DB connections). + - [x] Update Phoenix to execute translated SQL via Ecto using `ServiceRadarWebNG.Repo`. + - [x] Add integration tests to validate SRQL translation + execution from Elixir. + - [x] Add bind/placeholder arity validation in Rust (tests + debug checks). + - [x] Ensure existing SRQL HTTP service behavior remains intact (run existing `rust/srql` tests). +- [x] 1.6b Extend SRQL translate output with visualization metadata (column types, semantic hints) to support composable dashboards. +- [x] 1.6c Extend SRQL to support query patterns required by dashboards: + - [x] TimescaleDB-friendly time windowing and downsampling helpers. + - [x] AGE relationship queries (compiled into SQL using AGE `cypher()`), so graph panels can be SRQL-driven. + +### Property-Based Testing (StreamData) +- [x] 1.7 Add `stream_data` (and `ExUnitProperties`) to the `web-ng` ExUnit suite. + - [x] Add the dependency to `web-ng/mix.exs` (`stream_data`) under `only: :test`. + - [x] Ensure `ExUnitProperties` is available in tests. +- [x] 1.8 Integrate property-based testing into the `web-ng` ExUnit suite. + - [x] Add shared generators under `web-ng/test/support/generators/`. + - [x] Add `web-ng/test/property/` with at least one starter property test. + - [x] Ensure `mix test` runs property tests by default with bounded case-counts and an env override for deeper runs. + +## 2. Authentication (Fresh Implementation) +- [x] 2.1 Run `mix phx.gen.auth Accounts User ng_users`. + - [x] *Note:* Using `ng_users` ensures we do not conflict with the legacy `users` table. +- [x] 2.2 Run migrations to create the new auth tables. + - [x] *Note:* Use a dedicated Ecto migration source table to avoid collisions in shared CNPG (e.g., `ng_schema_migrations`). +- [x] 2.3 Verify registration/login flow works independently of the legacy system. + +## 3. Logic Porting (Shared Data) + +### Inventory & Infrastructure +- [x] 3.1 Create Ecto schemas for `unified_devices`, `pollers`, `services` (no migrations). + - [x] *Note:* Use `@primary_key {:id, :string, autogenerate: false}`. + - [x] *Note:* "No migrations" means Phoenix does not own the table DDL—Go Core does. Phoenix CAN still read/write data to these tables. + - [x] 3.1a Add `unified_devices` schema. + - [x] 3.1b Add `pollers` schema. + - [x] 3.1c Add `services` schema. +- [x] 3.2 Implement `Inventory.list_devices`. +- [x] 3.3 Implement `Infrastructure.list_pollers`. + +### Edge Onboarding +- [x] 3.4 Port `EdgeOnboardingPackage` schema (Shared Data). +- [x] 3.5 Implement token generation logic in Elixir. + - [x] 3.5a Add property tests for token encode/decode invariants (round-trip, URL-safe encoding, and invalid input handling). + +## 4. UI & API Implementation + +### SRQL-First UX (Analytics Pages) +- [x] 4.0 Add a global SRQL query bar in the top navigation for SRQL-driven pages. + - [x] *Note:* It MUST display the exact SRQL query used to render the current view. + - [x] *Note:* It MUST allow editing + re-running the query with bounded errors (no LiveView crashes). + - [x] *Note:* It SHOULD support deep-linking by storing the SRQL query in the URL (shareable links). +- [x] 4.0a Add SRQL-driven page helpers (common LiveView patterns: query state, loading/error states, query execution). +- [x] 4.0b Add property tests to ensure query input handling never crashes (malformed queries, malformed params). +- [x] 4.0c Add an SRQL query builder UI accessible from the query bar (toggle icon + expandable panel). + - [x] 4.0d Keep SRQL text as the source of truth; builder generates SRQL by updating the query bar. + - [x] 4.0e Implement a bounded fallback state when SRQL can't be represented by the builder (no destructive rewrites). + - [x] 4.0f Render the builder panel under the navbar (no navbar height changes). + - [x] 4.0g Support multiple filters in the builder UI (add/remove). + - [x] 4.0h Drive builder entities/fields from a catalog (easy to extend beyond devices/pollers). + +### API Replacement +- [x] 4.1 Create `ServiceRadarWebNG.Api.QueryController` (SRQL endpoint). + - [x] 4.1a Add property tests for request validation/decoding to ensure malformed JSON and random inputs never crash the endpoint. +- [x] 4.1b Update `/api/query` implementation to translate SRQL -> SQL and execute via Ecto (translator-only plan). +- [x] 4.2 Create `ServiceRadarWebNG.Api.DeviceController`. + - [x] 4.2a Include property tests for any new parsing/validation logic introduced by the device API (IDs, filters, and pagination). + +### Dashboard (LiveView) +- [x] 4.3 Re-implement the main Dashboard using LiveView (SRQL-first and composable). + - [x] 4.3a Implement a composable dashboard engine (query-driven widgets, visualization selection). + - [x] 4.3b Implement result-shape detection and visualization inference (prefer SRQL-provided metadata). + - [x] 4.3c Implement a plugin/registry mechanism for adding new visualizations without rewriting the engine. + - [x] 4.3d Implement time series widgets suitable for TimescaleDB hypertables (time-bounded windows, aggregation). + - [x] 4.3e Implement relationship/topology widgets backed by Apache AGE graph queries. + - [x] 4.3f Support deep-linking dashboards from SRQL (store query or dashboard definition in URL). + - [x] 4.3g Add tests for dashboard query execution and bounded error handling. + - [x] 4.3h Add SRQL-first Dashboard LiveView at `/dashboard` (query bar + results table). + - [x] 4.3i Add initial auto-viz inference (heuristics; replace with SRQL metadata in 1.6b/4.3b). +- [x] 4.4 Implement Device List view. + - [x] *Note:* Add authenticated `GET /devices` (initial scaffolding). + - [x] 4.4a Migrate `/devices` to be SRQL-driven and show the active SRQL query in the global query bar. +- [x] 4.5 Implement Poller List view. + - [x] *Note:* Add authenticated `GET /pollers` (initial scaffolding). + - [x] 4.5a Migrate `/pollers` to be SRQL-driven and show the active SRQL query in the global query bar. +- [x] 4.6 Implement Events List view (SRQL-driven). +- [x] 4.7 Implement Logs List view (SRQL-driven). +- [x] 4.8 Implement Services List view (SRQL-driven). +- [x] 4.9 Implement Interfaces List view (SRQL-driven). +- [x] 4.10 Add sidebar navigation layout (move analytics navigation to sidebar; keep SRQL query bar in the top header). + +## 5. Docker Compose Cutover (Local) +- [x] 5.1 Update `docker-compose.yml` to expose `web-ng` on port 80/443. +- [x] 5.2 Remove `kong` container from deployment. +- [x] 5.3 Remove standalone `srql` HTTP service container from deployment (SRQL is now embedded in Phoenix via Rustler). + +## 6. UI Polish & UX (Follow-up) +- [x] 6.1 Move ServiceRadar branding/logo into the left navigation sidebar. +- [x] 6.2 Reduce sidebar width and tighten spacing. +- [x] 6.3 Improve table styling across SRQL-driven pages (readability, hover, truncation). +- [x] 6.3a Format SRQL table cells (dates/URLs/badges) for readability. +- [x] 6.3b Upgrade Events/Logs/Services pages with panels + quick filters. +- [x] 6.4 Ensure Dashboard renders charts/graphs when results support it (not table-only). +- [x] 6.5 Add Device details page (SRQL-driven, with related charts where available). +- [x] 6.5a Hide metric sections when no data is present (no empty charts). +- [x] 6.6 Default Dashboard query to a metrics entity so charts render out-of-the-box. +- [x] 6.7 Add CPU/memory/disk metric chart sections to Device details. +- [x] 6.7a Reduce CPU panel noise (aggregate across cores by default). +- [x] 6.7b Improve timeseries charts (labels + shaded area + min/max/latest). +- [x] 6.8 Add `/analytics` hub page (SRQL-driven, chart-first). + - [x] 6.8a Add sidebar navigation entry for Analytics. + - [x] 6.8b Implement “KPI cards” (total devices, offline devices, high latency, failing services). + - [x] 6.8c Add at least 4 visualization panels (timeseries/categories) with sensible defaults (no empty dashboard). + - [x] 6.8d Add drill-down interactions (click KPI/chart -> navigate with pre-filtered SRQL). +- [x] 6.9 Upgrade `/devices` table for operational at-a-glance. + - [x] 6.9a Add “Health & Metrics” column with Online/Offline + ICMP sparkline latency. + - [x] 6.9b Query ICMP sparkline data in bulk (no per-row N+1 queries) for the current page device IDs. + - [x] 6.9c Ensure bounded performance (downsample, cap points, conservative refresh). + - [x] 6.9d Add tooltip/legend affordances for sparkline thresholds. + +## 7. Kubernetes Cutover (Deferred) +- [ ] 7.1 Add `serviceradar-web-ng` image build/push for k8s deployment. +- [ ] 7.2 Update demo k8s ingress/service routing to point to `web-ng`. +- [ ] 7.3 Remove Kong and SRQL HTTP service from k8s deployment. + +## 8. UI Polish Phase 2 (Dracula Theme & Styling) +- [x] 8.1 Implement Dracula color theme in daisyUI config. + - [x] Update dark theme to use proper Dracula colors (purple, pink, cyan, green, orange). + - [x] Reduced border width to 1px for cleaner look. +- [x] 8.2 Improve timeseries chart styling. + - [x] Update chart colors to Dracula palette (green, cyan, purple, pink, orange, yellow). + - [x] Improve gradient fills with higher opacity for better visibility. +- [x] 8.3 Improve ICMP sparkline in device inventory. + - [x] Add gradient fill under the line (like React version). + - [x] Use SVG path for area fill instead of plain polyline. + - [x] Use Dracula colors for tone-based styling (green for success, orange for warning, red for error). + - [x] Improve line styling with rounded caps/joins. +- [x] 8.4 Improve KPI cards in analytics dashboard. + - [x] Add larger icon boxes with better contrast. + - [x] Add hover scale effect for interactivity. + - [x] Use tone-based coloring for values (warning/error numbers stand out). + - [x] Add uppercase tracking for titles. + +## 9. Known Issues (To Investigate) +- [x] 9.1 SRQL stats query issue - "missing ':' in token" error. + - This error originates from the SRQL Rust parser (srql crate). + - Fixed parser to accept unquoted `stats:count() as total` by consuming `as <alias>` tokens as part of the `stats` expression. + - Implemented in `rust/srql` crate parser with regression tests. +- [x] 9.2 KPI cards showing 0 total devices. + - Fixed: Stats queries now execute correctly after 9.1 fix. + - Service counts now show unique services (by device_id:service_name composite key) instead of raw status check records. +- [x] 9.3 Scalability considerations for 50k+ devices. + - Design principle established: All UI must work at scale (50k to 2mil assets). + - Analytics KPI cards use aggregate queries, not per-device iteration. + - Service counts use bounded queries with unique counting logic. + - Remaining scale considerations tracked in Section 10. + +## 10. UI Polish Phase 3 (Scale-First Design) + +**Design Principle:** All UI must work seamlessly from 1 device to 2 million devices. + +### Recent Fixes (Completed) +- [x] 10.1 Fix critical logs/events widget scrolling in analytics dashboard. + - Changed from `ui_panel` to explicit flex structure with proper overflow handling. +- [x] 10.2 Add hover tooltips to timeseries charts (like React/Recharts version). + - Added `TimeseriesChart` JavaScript hook with mousemove/mouseleave handlers. + - Tooltip shows value and timestamp at cursor position with vertical line indicator. +- [x] 10.3 Make device details overview compact (inline key-value pairs). + - Replaced verbose table layout with horizontal flex wrap. +- [x] 10.4 Fix timeseries chart width (was 1/5 of container, now full-width). + - Changed `preserveAspectRatio` and increased chart width to 800px. +- [x] 10.5 Remove verbose timeseries labels ("Timeseries value over timestamp"). + - Simplified chart headers to show series name + latest value only. +- [x] 10.6 Fix service count showing all status checks instead of unique services. + - Count unique services by `device_id:service_name` composite key. + - Changed labels from "Total Services" to "Active Services (unique)". + +### Events Stream Improvements +- [x] 10.7 Consolidate events table columns (reduce horizontal scroll). + - [x] Show only essential columns: timestamp, severity, source, message summary. + - [x] Hide `event_type` column (not populated in current data). + - [x] Map raw column names to human-readable labels (Time, Severity, Source, Message). +- [x] 10.8 Add Event details page (`/events/:id`). + - [x] Show full event payload (all fields not shown in table). + - [x] Link from table row click to details page. + - [x] JSON syntax highlighting for structured payloads. + +### Logs Stream Improvements +- [x] 10.9 Consolidate logs table columns (reduce horizontal scroll). + - [x] Show only essential columns: timestamp, level, service, message snippet. + - [x] Map raw column names to human-readable labels (Time, Level, Service, Message). +- [x] 10.10 Add Log details page (`/logs/:id`). + - [x] Show full log entry (complete message body, all metadata). + - [x] Link from table row click to details page. + - [x] JSON syntax highlighting for structured log payloads. + - [x] Show trace/span IDs when available. + +### Services Page Improvements +- [x] 10.11 Add visualization to Services availability page. + - [x] Add KPI cards (total checks, available, unavailable) with percentage. + - [x] Add "By Service Type" horizontal bar chart showing availability breakdown. + - [x] Design works at scale: computes stats from bounded page results only. + - [x] Groups by service type with color-coded available/unavailable bars. +- [x] 10.12 Ensure Services page performs at 50k+ services. + - [x] Use pagination with bounded page sizes (default 50, max 200). + - [x] Aggregate counts computed from current page only (not full inventory). diff --git a/openspec/changes/add-srql-api-tests/proposal.md b/openspec/changes/add-srql-api-tests/proposal.md deleted file mode 100644 index 0ab87a7bc..000000000 --- a/openspec/changes/add-srql-api-tests/proposal.md +++ /dev/null @@ -1,18 +0,0 @@ -## Why -- The new Rust-based SRQL translator now owns the `/api/query` surface, but it shipped with only manual QA and a couple of unit tests that stop at the parser boundary. -- Without regression tests that exercise the DSL end-to-end (from HTTP payload → translator → CNPG row set), we cannot confidently evolve operators or refactor the Diesel plans. -- Dashboards and automation lean on SRQL as their query DSL; shipping without automated coverage means we risk silently breaking filters, aggregations, or error codes during routine refactors. - -## What Changes -1. Build a deterministic SRQL API test harness around the Rust service that spins up against a seeded Postgres/CNPG schema so `/api/query` calls can be executed inside `cargo test` (or equivalent Bazel target). -2. Capture canonical DSL scenarios—inventory filters, aggregations, ordering/limits—and assert that `/api/query` responses (rows + metadata) match golden fixtures so any regression fails fast. -3. Add negative-path coverage for bad DSL, malformed JSON, and auth failures to confirm the API returns 400/401 responses instead of panicking, and publish run instructions so CI and local contributors run the suite. -4. Layer translator-level unit tests that exercise the SRQL language semantics described in `docs/docs/srql-language-reference.md` (filters, entity selectors, sorting, availability flags, etc.) so example queries like `in:devices time:last_7d sort:last_seen:desc limit:20 is_available:true` are locked down independent of the HTTP harness. -5. Expand `query::tests` and module-level stats tests (e.g., devices, services, interfaces, pollers, logs, cpu_metrics) so every documented example or stats expression has a hermetic regression test, and keep Clippy lint clean (`cargo clippy -p srql`). -6. Provision a long-lived CNPG fixture (TimescaleDB + Apache AGE) inside our Kubernetes environment and expose a read/write test database to BuildBuddy and the GitHub custom runners so Bazel can execute the SRQL API suite remotely without spawning local Docker containers. The harness seeds/cleans this shared fixture before every run so tests stay deterministic. -7. Harden the harness for BuildBuddy/RBE by teaching it to locate fixture assets via Bazel runfiles (`RUNFILES_DIR` / `TEST_SRCDIR`) and by using the admin connection to install the required TimescaleDB and AGE extensions so the runtime SRQL user never needs elevated privileges. - -## Impact -- Introduces a seeded database fixture plus helper utilities for spinning up the SRQL server inside integration tests. The fixture runs centrally in our cluster so Bazel/BuildBuddy and GitHub runners reuse the same Postgres instance but re-seed it per test run. -- Adds new cargo/Bazel test targets that CI must execute (slower test runtime but required for coverage), and requires wiring the shared CNPG credentials into BuildBuddy and the GitHub runners. -- Requires documentation updates (README or docs/docs/agents.md) so contributors know how to run the SRQL API tests locally before submitting PRs. diff --git a/openspec/changes/add-srql-api-tests/specs/srql/spec.md b/openspec/changes/add-srql-api-tests/specs/srql/spec.md deleted file mode 100644 index 7ae5db2d1..000000000 --- a/openspec/changes/add-srql-api-tests/specs/srql/spec.md +++ /dev/null @@ -1,61 +0,0 @@ -## ADDED Requirements - -### Requirement: SRQL `/api/query` tests cover canonical DSL flows -ServiceRadar MUST ship automated tests that exercise the SRQL DSL end-to-end by issuing `/api/query` requests against the Rust translator wired to a deterministic CNPG/Postgres fixture so regressions in parsing, planning, or serialization are caught before release. - -#### Scenario: Canonical device filter succeeds -- **GIVEN** the SRQL test harness loads seeded `devices` + `telemetry_samples` rows into the fixture database -- **WHEN** the integration test submits the DSL statement used by the device inventory dashboard (filters by `site_id`, bucketizes packet loss, orders by `last_seen`) -- **THEN** the `/api/query` response matches the expected JSON rows and metadata stored with the test, proving the parser, planner, and Diesel executor cooperate correctly. - -#### Scenario: Aggregation + pagination preserved -- **GIVEN** the same harness -- **WHEN** a test issues an aggregation query with `GROUP BY` buckets and a `LIMIT/OFFSET` -- **THEN** the translator returns consistent totals/page counts across runs so dashboards relying on those semantics cannot regress silently. - -### Requirement: SRQL API fixtures are accessible from CI runners -The SRQL API suite MUST run in our Kubernetes-backed CI environment (BuildBuddy RBE + GitHub custom runners) without shelling out to a local Docker daemon. Tests use a shared CNPG fixture (TimescaleDB + Apache AGE) hosted in the cluster and receive a per-run database that is seeded/reset before executing. - -#### Scenario: BuildBuddy Bazel runs leverage shared CNPG -- **GIVEN** the BuildBuddy executor exposes secrets/environment variables with the CNPG hostname, credentials, and target schema -- **WHEN** `bazel test //rust/srql:srql_api_test` runs remotely -- **THEN** the harness connects to the shared CNPG instance, re-seeds the fixture tables before each case, and finishes without needing Docker or outbound registry access. - -#### Scenario: GitHub custom runners reuse the same fixture -- **GIVEN** our GitHub Actions workflow runs on the self-hosted runners in the cluster and mounts the same CNPG connection secrets -- **WHEN** `cargo test --test api` runs as part of CI -- **THEN** the suite connects to the shared fixture, resets the seed data, and produces deterministic results so CI is consistent regardless of runner location. - -### Requirement: SRQL `/api/query` tests enforce error handling -The automated suite MUST also assert that invalid DSL or unauthorized requests trigger the documented 400/401 responses so error handling does not regress when evolving the translator. - -#### Scenario: Invalid DSL returns 400 -- **GIVEN** a test that submits an SRQL statement referencing an unsupported field/operator -- **WHEN** `/api/query` executes inside the harness -- **THEN** it returns HTTP 400 with a structured error body rather than panicking or falling through to a 500. - -#### Scenario: Missing auth rejected with 401 -- **GIVEN** the test helper omits the Kong API key/SPIFFE headers -- **WHEN** it posts to `/api/query` -- **THEN** the service responds with HTTP 401 and does not attempt to parse or execute the DSL payload. - -### Requirement: SRQL DSL semantics validated by unit tests -Unit tests MUST exercise the SRQL language primitives documented in `docs/docs/srql-language-reference.md` so canonical `in:`, `time`, `sort`, `limit`, and boolean filter combinations keep returning deterministic result sets even if the HTTP harness is not running. - -#### Scenario: Device availability filters stay aligned with docs -- **GIVEN** translator-level unit tests load fixture devices mirroring the documentation example (`docs/docs/srql-language-reference.md:92-95`) -- **WHEN** the tests execute `in:devices time:last_7d sort:last_seen:desc limit:20 is_available:true` and the companion `... is_available:false` -- **THEN** the translator plans both statements into the expected Diesel AST, and the assertions confirm the selected IDs / ordering / row counts, proving availability filters continue matching the DSL semantics. - -#### Scenario: Aggregation example queries emit expected plans -- **GIVEN** translator-level unit tests that mirror the SRQL examples `in:devices discovery_sources:(sweep) discovery_sources:(armis) time:last_7d sort:last_seen:desc`, `in:services service_type:(ssh,sftp) timeFrame:"14 Days" sort:timestamp:desc`, and doc-driven stats queries (`docs/docs/srql-language-reference.md:93-104`) -- **WHEN** the translator parses and plans those DSL statements and the module-level stats helpers (devices, interfaces, pollers, cpu_metrics, logs) build SQL via `to_debug_sql` -- **THEN** the tests assert that filter semantics, order clauses, and stats SQL (e.g., `avg(usage_percent) as avg_cpu by device_id`) match the documented behavior so future parser refactors cannot silently change field mapping, alias propagation, or JSON payload structure. - -### Requirement: Comprehensive Entity Coverage -The test suite MUST cover all primary SRQL entities to ensure consistent behavior across the entire DSL surface area. - -#### Scenario: All entities queryable via harness -- **GIVEN** the SRQL test harness with a fully seeded fixture containing data for all entities -- **WHEN** tests issue queries for `pollers`, `services`, `cpu_metrics`, `logs`, and `otel_traces` -- **THEN** the system returns correct results for each entity type, validating that the DSL implementation correctly maps to the underlying CNPG tables for all supported domains. diff --git a/openspec/changes/add-srql-api-tests/tasks.md b/openspec/changes/add-srql-api-tests/tasks.md deleted file mode 100644 index 2d83d7266..000000000 --- a/openspec/changes/add-srql-api-tests/tasks.md +++ /dev/null @@ -1,13 +0,0 @@ -## 1. SRQL API fixture harness -- [x] 1.1 Define the fixture dataset (schema + seed rows) for devices and telemetry tables, and script loading it into the Postgres/CNPG instance the tests will hit. -- [x] 1.2 Add a reusable Rust test helper that boots the SRQL service (HTTP + Diesel pool) against the fixture DB and exposes a function for issuing `/api/query` requests with headers. - -## 2. DSL coverage tests -- [x] 2.1 Implement integration tests that submit canonical DSL statements (filters, aggregations, ordering, pagination) and assert the JSON responses and metadata match golden fixtures. -- [x] 2.2 Add regression tests for invalid DSL/fields plus malformed payloads to ensure the service returns 400 responses with descriptive error bodies instead of panics. -- [x] 2.3 Cover auth + tenant scoping by testing requests that omit/alter the Kong API key (or SPIFFE headers) and asserting a 401 is returned. -- [x] 2.4 Mirror the SRQL language reference examples (e.g., `in:devices time:last_7d sort:last_seen:desc limit:20 is_available:true/false`) as translator-level unit tests so key:value semantics stay validated without spinning up the HTTP harness. *(Added doc-driven unit tests under `rust/srql/src/query/mod.rs`, plus module-level stats tests for `devices`, `interfaces`, `pollers`, `logs`, and `cpu_metrics` so documented flows stay hermetic.)* - -## 3. CI + docs updates -- [ ] 3.1 Wire the new SRQL API tests into `cargo test` / Bazel / CI workflows so PRs cannot merge without passing them. *(GitHub `tests-rust.yml` now runs on ARC-hosted runners and installs GCC/Clang, pkg-config, OpenSSL development headers, and `protoc` via whatever package manager is present (apt/dnf/yum/microdnf) so SRQL + other Rust crates link successfully in both Ubuntu and Oracle Linux images.)* -- [ ] 3.2 Update contributor docs (e.g., `docs/docs/agents.md` or README) with steps for running the SRQL tests locally, including prerequisites for the fixture database. diff --git a/openspec/changes/fix-age-graph-deadlock-handling/proposal.md b/openspec/changes/fix-age-graph-deadlock-handling/proposal.md deleted file mode 100644 index 0dc442cba..000000000 --- a/openspec/changes/fix-age-graph-deadlock-handling/proposal.md +++ /dev/null @@ -1,45 +0,0 @@ -# Change: Fix AGE graph deadlock handling with write serialization - -## Why -Core in the demo namespace is emitting deadlock errors (`deadlock detected` / SQLSTATE 40P01) and entity update failures (`Entity failed to be updated: 3` / SQLSTATE XX000) during AGE graph merges. Investigation of issue #2058 reveals: - -1. **Multiple concurrent database connections executing MERGE queries**: With `AGE_GRAPH_WORKERS=4`, up to 4 workers execute MERGE batches simultaneously against the same graph. CNPG logs show 3+ concurrent processes hitting the same timestamp with XX000 errors. - -2. **Failure rate approaching 50%**: Logs show `270 failures vs 304 successes` - nearly half of all graph writes are failing. - -3. **Deadlocks not classified as transient errors**: The `classifyAGEError()` function only treats XX000 and 57014 as transient. Deadlock (40P01) and serialization failure (40001) errors cause immediate batch failure without retry. - -4. **Circular lock dependencies from parallel MERGE**: When two workers update overlapping nodes (e.g., same Collector referenced by multiple devices), they create lock contention: - - Worker A locks Node X, waits for Node Y - - Worker B locks Node Y, waits for Node X - - Result: PostgreSQL aborts one as deadlock victim (40P01) or raises XX000 - -## What Changes - -### 1. Write Serialization with Mutex (Root Cause Fix) -Add a Go-level mutex (`writeMu`) in `ageGraphWriter` to serialize all AGE graph MERGE operations. Multiple workers can still process the queue (for responsiveness), but only one can execute a database query at a time. This eliminates the concurrent write contention that causes deadlocks. - -### 2. Expanded Transient Error Classification -Add SQLSTATE 40P01 (deadlock_detected) and 40001 (serialization_failure) to the list of transient errors that trigger retry with backoff. Includes string fallback patterns for wrapped errors. - -### 3. Improved Backoff Strategy for Deadlocks -Implement separate backoff timing for deadlock errors (500ms base vs 150ms for other errors) with exponential growth and randomized jitter to break lock acquisition synchronization. - -### 4. Deadlock-Specific Metrics -Add new OTel metrics to track deadlock and serialization failure frequency: -- `age_graph_deadlock_total`: Count of deadlock errors encountered -- `age_graph_serialization_failure_total`: Count of serialization failures -- `age_graph_transient_retry_total`: Count of all transient retries - -## Impact -- Affected specs: device-relationship-graph -- Affected code: - - `pkg/registry/age_graph_writer.go` (writeMu, classifyAGEError, backoffDelay) - - `pkg/registry/age_graph_metrics.go` (new metrics) -- Risk: Low - serialization may reduce throughput but eliminates failures -- Performance: Graph writes become sequential but reliable. Queue depth may increase temporarily during bursts but will drain successfully. - -## Trade-offs -- **Throughput vs Reliability**: Serializing writes reduces parallel throughput but ensures ~100% success rate vs ~50% with concurrent writes. -- **Queue Latency**: Individual batches may wait longer in queue, but total time to completion improves due to elimination of failed retries. -- **Worker Count**: Multiple workers still provide value for queue responsiveness even though writes are serialized. diff --git a/openspec/changes/fix-age-graph-deadlock-handling/specs/device-relationship-graph/spec.md b/openspec/changes/fix-age-graph-deadlock-handling/specs/device-relationship-graph/spec.md deleted file mode 100644 index 309a4a0fa..000000000 --- a/openspec/changes/fix-age-graph-deadlock-handling/specs/device-relationship-graph/spec.md +++ /dev/null @@ -1,60 +0,0 @@ -## ADDED Requirements - -### Requirement: AGE graph writes are serialized to prevent deadlocks -The system SHALL serialize AGE graph MERGE operations using a mutex so that only one write executes against the graph at any time, eliminating concurrent write contention that causes deadlocks and lock conflicts. - -#### Scenario: Concurrent batches do not deadlock -- **GIVEN** multiple workers processing the AGE graph queue -- **WHEN** two workers attempt to execute MERGE batches simultaneously -- **THEN** the mutex ensures only one executes at a time and the other waits, preventing deadlock. - -#### Scenario: Queue processing remains responsive -- **GIVEN** a burst of graph updates arriving in the queue -- **WHEN** writes are serialized via mutex -- **THEN** multiple workers can still accept work from the queue, only serializing at the database execution point. - -### Requirement: AGE graph writes handle deadlocks with retry -The system SHALL classify PostgreSQL deadlock errors (SQLSTATE 40P01) and serialization failures (SQLSTATE 40001) as transient errors that trigger retry with backoff, so any residual concurrent conflicts do not permanently fail batches. - -#### Scenario: Deadlock triggers retry instead of failure -- **GIVEN** a MERGE batch that encounters a deadlock error -- **WHEN** PostgreSQL returns SQLSTATE 40P01 -- **THEN** the batch retries with exponential backoff and eventually commits without data loss. - -#### Scenario: Serialization failure triggers retry -- **GIVEN** a MERGE batch that encounters a serialization failure -- **WHEN** PostgreSQL returns SQLSTATE 40001 -- **THEN** the batch retries with exponential backoff and eventually commits. - -### Requirement: Deadlock backoff uses longer delays with randomized jitter -The system SHALL use a longer base backoff (500ms) for deadlock and serialization errors compared to other transient errors (150ms), with exponential growth and randomized jitter to break lock acquisition synchronization patterns. - -#### Scenario: Deadlock retries use appropriate backoff -- **GIVEN** a batch that fails with deadlock error -- **WHEN** the batch prepares to retry -- **THEN** the backoff delay starts at 500ms (vs 150ms for other errors) with randomized jitter. - -### Requirement: Deadlock metrics are tracked separately -The system SHALL expose distinct metrics for deadlock and serialization failure occurrences to enable monitoring and alerting on contention-specific issues. - -#### Scenario: Operator monitors deadlock frequency -- **GIVEN** the Prometheus/OTel metrics endpoint is scraped -- **WHEN** the operator queries `age_graph_deadlock_total` -- **THEN** they can see the count of deadlock errors and alert if frequency increases. - -## MODIFIED Requirements - -### Requirement: AGE graph writes tolerate contention with retries and backpressure (MODIFIED) -The system SHALL process AGE graph merges through a serialized, backpressure-aware writer that: -1. Serializes writes via mutex to prevent concurrent MERGE conflicts -2. Retries transient AGE errors including: - - SQLSTATE XX000 "Entity failed to be updated" (lock contention) - - SQLSTATE 57014 statement timeout - - SQLSTATE 40P01 deadlock_detected (NEW) - - SQLSTATE 40001 serialization_failure (NEW) - -So overlapping registry/backfill writes do not drop batches due to concurrency conflicts. - -#### Scenario: High-volume writes succeed without deadlocks -- **WHEN** pollers and agents generate bursts of device updates -- **THEN** writes are serialized, queue drains successfully, and no deadlock or XX000 errors occur in logs. diff --git a/openspec/changes/fix-age-graph-deadlock-handling/tasks.md b/openspec/changes/fix-age-graph-deadlock-handling/tasks.md deleted file mode 100644 index 613d3296f..000000000 --- a/openspec/changes/fix-age-graph-deadlock-handling/tasks.md +++ /dev/null @@ -1,28 +0,0 @@ -## 1. Write Serialization (Root Cause Fix) -- [x] 1.1 Add `writeMu sync.Mutex` to `ageGraphWriter` struct to serialize database writes. -- [x] 1.2 Wrap `ExecuteQuery` call in `processRequest` with mutex lock/unlock. -- [x] 1.3 Document the serialization approach in code comments. - -## 2. Expand transient error classification -- [x] 2.1 Add SQLSTATE 40P01 (deadlock_detected) to `classifyAGEError()` as a transient error. -- [x] 2.2 Add SQLSTATE 40001 (serialization_failure) to `classifyAGEError()` as a transient error. -- [x] 2.3 Add string fallback patterns for "deadlock detected" and "could not serialize access" in wrapped errors. -- [x] 2.4 Define named constants for SQLSTATE codes for clarity. - -## 3. Improve backoff strategy for deadlocks -- [x] 3.1 Add `defaultAgeGraphDeadlockBackoff` constant (500ms) for deadlock-specific backoff. -- [x] 3.2 Update `backoffDelay()` to accept SQLSTATE code and use longer backoff for deadlocks. -- [x] 3.3 Implement exponential backoff with randomized jitter. -- [x] 3.4 Add `AGE_GRAPH_DEADLOCK_BACKOFF_MS` environment variable for tuning. - -## 4. Add deadlock-specific metrics -- [x] 4.1 Add `age_graph_deadlock_total` counter metric. -- [x] 4.2 Add `age_graph_serialization_failure_total` counter metric. -- [x] 4.3 Add `age_graph_transient_retry_total` counter metric. -- [x] 4.4 Record metrics in `processRequest` when corresponding errors occur. - -## 5. Testing and validation -- [ ] 5.1 Build and lint verification (completed). -- [ ] 5.2 Deploy to demo namespace and verify deadlocks/XX000 errors are eliminated. -- [ ] 5.3 Monitor metrics to confirm near-zero deadlock rate. -- [ ] 5.4 Verify queue drains successfully under load. diff --git a/openspec/changes/fix-checker-device-identity/proposal.md b/openspec/changes/fix-checker-device-identity/proposal.md deleted file mode 100644 index d46333c35..000000000 --- a/openspec/changes/fix-checker-device-identity/proposal.md +++ /dev/null @@ -1,83 +0,0 @@ -# Change: Fix Checker Device Identity Resolution - -## Why -Checkers (sysmon, SNMP collectors, etc.) running on agents/pollers are incorrectly creating device records for the collector's ephemeral host IP (e.g., Docker container IP `172.18.0.5`) instead of only creating devices for the actual monitored targets (e.g., sysmon-vm at `192.168.1.218`). This results in phantom devices appearing in the device inventory UI with hostnames like "agent" and ephemeral container IPs. - -**Important:** ServiceRadar internal infrastructure services (agents, pollers, datasvc/kv, sync, mapper, otel, zen) MUST continue to appear as devices in inventory - these are self-reported services that users need to monitor for health/availability. - -## What Changes - -### Leverage Existing DIRE Infrastructure -The Device Identity Reconciliation Engine (DIRE) already has the concept of **service device IDs** (`serviceradar:type:id`) which act as strong identifiers that: -- Are stable across IP changes (the ID is based on service name, not IP) -- Skip IP-based deduplication and resolution -- Allow services to update their IP when containers restart without creating duplicate devices - -**Key insight:** Internal services should use `serviceradar:type:id` format IDs, which the existing DIRE system already handles correctly for IP churn. - -### Changes Required - -1. **Add new ServiceTypes for core services** in `pkg/models/service_device.go`: - - `ServiceTypeDatasvc` / `ServiceTypeKV` - - `ServiceTypeSync` - - `ServiceTypeMapper` - - `ServiceTypeOtel` - - `ServiceTypeZen` - -2. **Ensure core services register using service device IDs**: - - When datasvc, sync, mapper, otel, zen report status, they should use `serviceradar:datasvc:instance-name` format - - This leverages existing DIRE skip logic: `isServiceDeviceID()` returns true, DIRE skips IP-based resolution - -3. **Fix `ensureServiceDevice` in `pkg/core/devices.go`**: - - Currently creates devices with `partition:IP` format for checker hosts - - Should distinguish between: - - **Self-reported internal service** → use `serviceradar:type:id` format (handled by existing code paths) - - **Checker polling external target** → only create device for the TARGET IP, not the checker's host IP - - Extract target address from checker config/service data and only create device for that target - -4. **Skip device creation for checker's own host IP**: - - When processing gRPC checker results, detect if `host_ip` matches the agent/poller's registered IP - - If so, skip device creation for that IP (it's the collector, not the target) - -## Impact -- Affected specs: `device-identity-reconciliation` -- Affected code: - - `pkg/models/service_device.go` (add new ServiceTypes) - - `pkg/core/devices.go` (`ensureServiceDevice`, extract target vs collector host) - - Core services registration (datasvc, sync, mapper, otel, zen) to use service device IDs - -## Implementation Status - -### Completed (Unit Tested) - -#### 1. ServiceTypes for Core Services (`pkg/models/service_device.go`) -- Added `ServiceTypeDatasvc`, `ServiceTypeKV`, `ServiceTypeSync`, `ServiceTypeMapper`, `ServiceTypeOtel`, `ServiceTypeZen`, `ServiceTypeCore` -- Added `CreateCoreServiceDeviceUpdate()` helper in `pkg/models/service_registration.go` - -#### 2. Core Service Registration (`pkg/core/services.go`) -- `getCoreServiceType()` - identifies core services from service type string -- `findCoreServiceType()` - scans services list for core services -- `registerCoreServiceDevice()` - registers core service with stable device ID -- `registerServiceOrCoreDevice()` - routes to correct registration path - -#### 3. Fix Checker Device Registration (`pkg/core/devices.go`) -- Modified `ensureServiceDevice()` to detect and skip collector IPs -- `getCollectorIP()` - looks up agent/poller IP from ServiceRegistry -- `isEphemeralCollectorIP()` - heuristic fallback for phantom detection -- `isDockerBridgeIP()` - identifies Docker bridge network IPs (172.17-21.x.x) -- `extractIPFromMetadata()` - extracts IP from service metadata - -#### 4. Database Migration (`pkg/db/cnpg/migrations/`) -- `00000000000011_cleanup_phantom_devices.up.sql` - removes phantom devices with backup -- `00000000000011_cleanup_phantom_devices.down.sql` - restores from backup - -#### 5. Test Coverage -- 21 unit tests covering all new functionality -- Tests in `pkg/core/devices_test.go`, `pkg/core/services_core_test.go`, `pkg/models/service_device_test.go` -- Edge cases: IP normalization, Docker IP boundaries, hostname case sensitivity, nil registries -- Safety tests: service device IDs excluded from phantom cleanup, legitimate Docker targets preserved - -### Pending (Integration/Manual Verification) -- Integration test: Agent restart with new IP updates existing device -- Integration test: Core services appear in device inventory -- Manual verification of device inventory in production environment diff --git a/openspec/changes/fix-checker-device-identity/specs/device-identity-reconciliation/spec.md b/openspec/changes/fix-checker-device-identity/specs/device-identity-reconciliation/spec.md deleted file mode 100644 index d865bb002..000000000 --- a/openspec/changes/fix-checker-device-identity/specs/device-identity-reconciliation/spec.md +++ /dev/null @@ -1,112 +0,0 @@ -## ADDED Requirements -### Requirement: Service Device ID as Strong Identifier -The system SHALL use service device IDs (`serviceradar:type:id` format) as strong identifiers for all ServiceRadar internal infrastructure services, allowing IP addresses to change without creating duplicate device records. - -#### Scenario: Agent IP changes after container restart -- **WHEN** agent `docker-agent` with device ID `serviceradar:agent:docker-agent` restarts and gets a new IP from `172.18.0.5` to `172.18.0.8` -- **THEN** the system updates the existing device record's IP field without creating a new device, because the service device ID is the strong identifier - -#### Scenario: Poller IP changes in Kubernetes -- **WHEN** poller `k8s-poller` with device ID `serviceradar:poller:k8s-poller` is rescheduled to a different node with a new pod IP -- **THEN** the existing device record is updated with the new IP, maintaining continuity of the device's history and metrics - -#### Scenario: DIRE skips IP-based resolution for service devices -- **WHEN** a device update arrives with a `serviceradar:` prefixed device ID -- **THEN** the Device Identity Reconciliation Engine (DIRE) skips IP-based deduplication and resolution, preserving the service device ID as-is - -### Requirement: Core Service Registration with Service Device IDs -All ServiceRadar core services (datasvc/kv, sync, mapper, otel, zen) SHALL register themselves using service device IDs so they appear in device inventory with stable identities that survive IP changes. - -#### Scenario: Datasvc registers as service device -- **WHEN** the datasvc (KV) service starts and reports its status -- **THEN** it registers with device ID `serviceradar:datasvc:instance-name` and its current host IP, appearing in device inventory as an internal service - -#### Scenario: Sync service registers as service device -- **WHEN** the sync service starts and reports its status -- **THEN** it registers with device ID `serviceradar:sync:instance-name` and its current host IP - -#### Scenario: Core service survives IP change -- **WHEN** any core service (datasvc, sync, mapper, otel, zen) restarts with a new ephemeral IP -- **THEN** the existing device record is updated rather than creating a duplicate, because the service device ID remains constant - -### Requirement: Checker Target vs Collector Host Distinction -The system SHALL distinguish between the collector host (where the checker runs) and the monitoring target (what the checker monitors), only creating device records for monitoring targets. - -#### Scenario: gRPC checker polls remote sysmon target -- **WHEN** a gRPC checker running on agent `docker-agent` at IP `172.18.0.5` polls sysmon-vm at target IP `192.168.1.218` -- **THEN** the system creates a device record only for the target `192.168.1.218` and does NOT create a device record for the collector IP `172.18.0.5` based on the checker result - -#### Scenario: SNMP collector polls remote target -- **WHEN** an SNMP collector running on poller at `172.18.0.6` polls metrics from target `192.168.1.1` -- **THEN** the system creates a device record only for `192.168.1.1` and does NOT create a device for the collector IP - -#### Scenario: Checker host IP matches agent IP -- **WHEN** a checker reports `host_ip: 172.18.0.5` and that IP matches the registered agent's current IP -- **THEN** the system recognizes this as the collector's own address and skips device creation for that IP from the checker result - -### Requirement: Checker Definitions and Results Are Not Devices -The system SHALL NOT create unified device records or sightings from checker definitions in poller configuration or from checker host metadata; only the monitoring targets themselves may become devices. - -#### Scenario: Poller checker definition is ignored -- **WHEN** `poller.json` defines a checker service (e.g., `checker_service: sysmon-vm`, `checker_service_type: grpc`, `checker_host_ip: 172.18.0.5`) -- **THEN** no unified device or device sighting is created from that checker definition; device creation is limited to actual monitoring targets discovered at runtime - -#### Scenario: Checker host result is ignored -- **WHEN** a checker result is received with `checker_host_ip: 172.18.0.5`, `checker_service: sysmon-vm`, and `source: checker` for collector `docker-agent` -- **THEN** the system skips device creation for that host IP and service ID, ensuring the existing target device (e.g., sysmon-vm) remains the only device in inventory - -### Requirement: Internal Service Type Registry -The system SHALL maintain a registry of ServiceTypes for internal services that use service device ID format. - -#### Scenario: ServiceType constants for core services -- **WHEN** the system is initialized -- **THEN** ServiceType constants exist for: `poller`, `agent`, `checker`, `datasvc`, `kv`, `sync`, `mapper`, `otel`, `zen` - -#### Scenario: isServiceDeviceID check -- **WHEN** determining if a device ID is for an internal service -- **THEN** the system checks if the ID starts with `serviceradar:` prefix to identify service device IDs - -### Requirement: Heuristic Fallback for Phantom Device Detection -When the collector IP cannot be determined from the ServiceRegistry, the system SHALL use heuristics to detect phantom devices based on Docker bridge network IPs and collector-like hostnames. - -#### Scenario: Docker bridge IP with agent hostname -- **WHEN** a checker reports `host_ip: 172.18.0.5` with hostname containing "agent" -- **AND** the ServiceRegistry lookup returns empty (collector IP unknown) -- **THEN** the system identifies this as an ephemeral collector IP and skips device creation - -#### Scenario: Docker bridge IP with proper target hostname -- **WHEN** a checker reports `host_ip: 172.18.0.10` with hostname `mysql-primary` -- **THEN** the system creates a device record because the hostname indicates a legitimate target, not a collector - -#### Scenario: Non-Docker IP with agent hostname -- **WHEN** a checker reports `host_ip: 192.168.1.100` with hostname `my-agent-server` -- **THEN** the system creates a device record because the IP is not in Docker bridge ranges - -#### Scenario: Docker IP boundary conditions -- **WHEN** the system evaluates IP addresses -- **THEN** it identifies IPs in ranges 172.17.0.0-172.21.255.255 as Docker bridge network IPs -- **AND** IPs like 172.16.x.x or 172.22.x.x are NOT considered Docker bridge IPs - -### Requirement: Database Cleanup Migration for Phantom Devices -The system SHALL provide a database migration to clean up existing phantom devices while preserving legitimate service device records. - -#### Scenario: Migration backs up phantom devices before deletion -- **WHEN** the cleanup migration runs -- **THEN** it creates a backup table `_phantom_devices_backup` containing all devices to be deleted -- **AND** then deletes the phantom devices from `unified_devices` - -#### Scenario: Migration preserves service device IDs -- **WHEN** the cleanup migration identifies phantom devices -- **THEN** it excludes all devices with `device_id LIKE 'serviceradar:%'` from deletion - -#### Scenario: Migration rollback restores deleted devices -- **WHEN** the rollback migration runs -- **THEN** it restores all devices from `_phantom_devices_backup` to `unified_devices` -- **AND** drops the backup table - -#### Scenario: Phantom device identification criteria -- **WHEN** the migration identifies phantom devices -- **THEN** it matches devices with: - - IP in Docker bridge ranges (172.17-21.x.x) - - Source is 'checker' or 'self-reported' - - Hostname is NULL, empty, 'unknown', 'localhost', or contains 'agent', 'poller', 'collector' diff --git a/openspec/changes/fix-checker-device-identity/tasks.md b/openspec/changes/fix-checker-device-identity/tasks.md deleted file mode 100644 index 5efaa8e88..000000000 --- a/openspec/changes/fix-checker-device-identity/tasks.md +++ /dev/null @@ -1,84 +0,0 @@ -## 1. Add ServiceTypes for Core Services - -- [x] 1.1 Add `ServiceTypeDatasvc ServiceType = "datasvc"` to `pkg/models/service_device.go` -- [x] 1.2 Add `ServiceTypeKV ServiceType = "kv"` (alias for datasvc legacy name) -- [x] 1.3 Add `ServiceTypeSync ServiceType = "sync"` -- [x] 1.4 Add `ServiceTypeMapper ServiceType = "mapper"` -- [x] 1.5 Add `ServiceTypeOtel ServiceType = "otel"` -- [x] 1.6 Add `ServiceTypeZen ServiceType = "zen"` -- [x] 1.7 Add `CreateCoreServiceDeviceUpdate()` helper similar to `CreatePollerDeviceUpdate()` -- [x] 1.8 Add `ServiceTypeCore ServiceType = "core"` for the core service - -## 2. Core Service Registration with Service Device IDs - -- [x] 2.1 Core now auto-detects datasvc service type and registers with `serviceradar:datasvc:instance-name` device ID -- [x] 2.2 Core now auto-detects sync service type and registers with `serviceradar:sync:instance-name` device ID -- [x] 2.3 Core now auto-detects mapper service type and registers with `serviceradar:mapper:instance-name` device ID -- [x] 2.4 Core now auto-detects otel service type and registers with `serviceradar:otel:instance-name` device ID -- [x] 2.5 Core now auto-detects zen service type and registers with `serviceradar:zen:instance-name` device ID -- [x] 2.6 Each core service includes its host IP in the device update via `registerCoreServiceDevice()` -- [x] 2.7 Added `getCoreServiceType()` to identify core services from service type string -- [x] 2.8 Added `findCoreServiceType()` to scan services list for core services -- [x] 2.9 Added `registerServiceOrCoreDevice()` helper to DRY device registration - -## 3. Fix Checker Device Registration - -- [x] 3.1 Modify `ensureServiceDevice` in `pkg/core/devices.go`: - - Check if `host_ip` matches collector's registered IP - - Skip device creation if it's the collector's own address - - Add heuristic to detect ephemeral Docker IPs with agent/poller hostnames -- [x] 3.2 Add `getCollectorIP()` helper to look up agent/poller's registered IP -- [x] 3.3 Add `isEphemeralCollectorIP()` heuristic to detect phantom devices -- [x] 3.4 Add `isDockerBridgeIP()` helper to identify Docker bridge network IPs -- [x] 3.5 Add `extractIPFromMetadata()` helper to extract IP from service metadata - -## 4. Agent/Poller IP Tracking for Collector Detection - -- [x] 4.1 Use existing ServiceRegistry to look up agent/poller IPs -- [x] 4.2 Add `getCollectorIP(ctx, agentID, pollerID)` helper that queries ServiceRegistry and DB -- [x] 4.3 In `ensureServiceDevice`, check if extracted `host_ip` equals collector's registered IP -- [x] 4.4 If match, log debug message and skip device creation (it's the collector, not target) - -## 5. Database Cleanup for Existing Phantom Devices - -- [x] 5.1 Write SQL query to identify phantom devices (see migration file) -- [x] 5.2 Create migration `00000000000011_cleanup_phantom_devices.up.sql`: - - Creates backup table `_phantom_devices_backup` before deletion - - Identifies phantom devices by Docker bridge IPs + checker source + collector hostname - - Preserves all `serviceradar:*` service devices - - Deletes identified phantom devices -- [x] 5.3 Create rollback migration `00000000000011_cleanup_phantom_devices.down.sql` - -## 6. Testing - -- [x] 6.1 Unit test: `CreateCoreServiceDeviceUpdate()` generates correct service device ID -- [x] 6.2 Unit test: Service device ID survives IP change (same device_id, updated IP) -- [x] 6.3 Unit test: `isDockerBridgeIP()` correctly identifies Docker bridge IPs -- [x] 6.4 Unit test: `isEphemeralCollectorIP()` detects phantom collector devices -- [x] 6.5 Unit test: `extractIPFromMetadata()` extracts IP from various metadata keys -- [x] 6.6 Unit test: `ensureServiceDevice` skips device creation for ephemeral collector IPs -- [x] 6.7 Unit test: `ensureServiceDevice` creates devices for legitimate targets -- [x] 6.8 Unit test: `getCoreServiceType()` identifies all core service types -- [x] 6.9 Unit test: `findCoreServiceType()` scans service lists correctly -- [x] 6.10 Unit test: `registerCoreServiceDevice()` creates stable device IDs -- [x] 6.11 Unit test: `registerServiceOrCoreDevice()` routes to correct registration -- [x] 6.12 Unit test: Core service device ID format verification -- [x] 6.13 Unit test: `normalizeHostIdentifier()` IP normalization edge cases -- [x] 6.14 Unit test: `isDockerBridgeIP()` boundary conditions (172.16 vs 172.17-21 vs 172.22) -- [x] 6.15 Unit test: `isEphemeralCollectorIP()` case sensitivity and partial hostname matches -- [x] 6.16 Unit test: `getCollectorIP()` nil registry handling -- [x] 6.17 Unit test: `extractCheckerHostIdentity()` JSON parsing variations -- [x] 6.18 Unit test: `ensureServiceDevice` non-gRPC service type filtering -- [x] 6.19 Unit test: `ensureServiceDevice` unknown/empty host IP handling -- [x] 6.20 Unit test: Service device IDs don't match phantom cleanup migration criteria -- [x] 6.21 Unit test: Legitimate Docker targets (mysql, redis) not falsely detected as phantom -- [ ] 6.22 Integration test: Agent restart with new IP updates existing device, no duplicate -- [ ] 6.23 Integration test: Core services (datasvc, sync, etc.) appear in device inventory - -## 7. Verification - -- [ ] 7.1 Verify agents appear in device inventory with `serviceradar:agent:*` IDs -- [ ] 7.2 Verify pollers appear in device inventory with `serviceradar:poller:*` IDs -- [ ] 7.3 Verify core services appear in device inventory with their service device IDs -- [ ] 7.4 Verify checker targets (e.g., sysmon-vm at 192.168.1.218) appear correctly -- [ ] 7.5 Verify NO phantom devices with Docker bridge IPs and hostname "agent" diff --git a/openspec/changes/fix-clone-device-record-metadata-aliasing/proposal.md b/openspec/changes/fix-clone-device-record-metadata-aliasing/proposal.md new file mode 100644 index 000000000..abe0108ef --- /dev/null +++ b/openspec/changes/fix-clone-device-record-metadata-aliasing/proposal.md @@ -0,0 +1,21 @@ +# Change: Fix `cloneDeviceRecord` metadata aliasing for empty maps + +## Why +Issue #2145 reports that `pkg/registry/device_store.go:cloneDeviceRecord` fails to deep-copy `DeviceRecord.Metadata` when the source map is empty-but-non-nil. Because the record is shallow-copied (`dst := *src`), the clone and original end up sharing the same underlying map reference, defeating the defensive-copying contract used throughout the device registry. + +This can cause surprising cross-call contamination (a later clone “inherits” keys written to a previous clone), incorrect device state in the registry’s in-memory cache, and potential data races when callers concurrently read/write “independent” record copies. + +## What Changes +- Update `cloneDeviceRecord` to deep-copy `Metadata` when `src.Metadata != nil` (including empty maps). +- Update `cloneDeviceRecord` to deep-copy `DiscoverySources` and `Capabilities` when non-nil (including empty-but-non-nil slices) to avoid similar aliasing via shared backing arrays. +- Add regression tests covering: + - Empty-but-non-nil `Metadata` map isolation (original vs clone and clone vs clone). + - Empty-but-non-nil slices with non-zero capacity (append to clone does not affect original or subsequent clones). + +## Impact +- Affected specs: `device-registry-defensive-copying` +- Affected code: + - `pkg/registry/device_store.go` (`cloneDeviceRecord`) + - `pkg/registry/*_test.go` (new/updated unit tests) +- Risk: Low. Changes are limited to defensive copying; primary impact is small additional allocations during record cloning. + diff --git a/openspec/changes/fix-clone-device-record-metadata-aliasing/specs/device-registry-defensive-copying/spec.md b/openspec/changes/fix-clone-device-record-metadata-aliasing/specs/device-registry-defensive-copying/spec.md new file mode 100644 index 000000000..89b9e1604 --- /dev/null +++ b/openspec/changes/fix-clone-device-record-metadata-aliasing/specs/device-registry-defensive-copying/spec.md @@ -0,0 +1,22 @@ +## ADDED Requirements + +### Requirement: DeviceRecord clone isolation +The device registry MUST return `DeviceRecord` values whose mutable fields do not alias internal stored state or other returned clones, including when those fields are empty-but-non-nil. + +#### Scenario: Empty metadata maps are deep-copied +- **GIVEN** a stored `DeviceRecord` has `Metadata` set to an empty (but non-nil) map +- **WHEN** a caller retrieves a copy via a registry getter that uses `cloneDeviceRecord` +- **THEN** the returned `Metadata` MUST be a distinct map instance from the stored record +- **AND** mutating the returned `Metadata` MUST NOT change the stored record’s `Metadata` + +#### Scenario: Empty slices are non-aliased +- **GIVEN** a stored `DeviceRecord` has `DiscoverySources` and/or `Capabilities` set to empty-but-non-nil slices (including cases with non-zero capacity) +- **WHEN** a caller retrieves a copy via a registry getter that uses `cloneDeviceRecord` +- **THEN** appending to the returned slices MUST NOT affect the stored record’s slices +- **AND** subsequent clones MUST NOT observe values appended to earlier clones + +#### Scenario: Clone mutation does not affect other clones +- **GIVEN** two callers retrieve independent clones of the same stored `DeviceRecord` +- **WHEN** one caller mutates its clone’s `Metadata` (e.g., sets a new key) +- **THEN** the other caller’s clone MUST NOT reflect that mutation + diff --git a/openspec/changes/fix-clone-device-record-metadata-aliasing/tasks.md b/openspec/changes/fix-clone-device-record-metadata-aliasing/tasks.md new file mode 100644 index 000000000..83dbcc422 --- /dev/null +++ b/openspec/changes/fix-clone-device-record-metadata-aliasing/tasks.md @@ -0,0 +1,13 @@ +## 1. Fix cloning behavior +- [x] 1.1 Update `cloneDeviceRecord` to deep-copy `Metadata` for any non-nil map (even when empty) +- [x] 1.2 Update `cloneDeviceRecord` to deep-copy `DiscoverySources` and `Capabilities` for any non-nil slice (even when empty) +- [x] 1.3 Ensure pointer fields (`Hostname`, `MAC`, `IntegrationID`, `CollectorAgentID`) remain non-aliased as today + +## 2. Add regression tests +- [x] 2.1 Add unit test: empty-but-non-nil `Metadata` is deep-copied (mutating clone does not affect original) +- [x] 2.2 Add unit test: two clones from the same record do not share `Metadata` (mutating one does not affect the other) +- [x] 2.3 Add unit test: empty-but-non-nil slices (including `make([]string, 0, N)`) do not alias (appending to clone does not affect original or later clones) + +## 3. Verification +- [x] 3.1 Run `go test ./pkg/registry/...` +- [x] 3.2 Run `go test -race ./pkg/registry/...` (or a targeted race repro test if needed) diff --git a/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/proposal.md b/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/proposal.md deleted file mode 100644 index 19374862a..000000000 --- a/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/proposal.md +++ /dev/null @@ -1,44 +0,0 @@ -## Why -- The CNPG cluster is experiencing continuous aggregate refresh failures with error: `cache lookup failed for function "time_bucket" with 2 args` (job 1032). -- TimescaleDB continuous aggregates (CAGGs) store internal references to functions like `time_bucket` by their PostgreSQL Object ID (OID). When these OIDs become stale, the background refresh jobs fail silently every 5 minutes. -- This issue typically occurs after: - 1. CNPG pod restarts or failovers where extensions may be reloaded - 2. TimescaleDB extension upgrades that change function OIDs - 3. Template database inconsistencies during cluster initialization -- The device metrics summary CAGGs (`device_metrics_summary_cpu`, `device_metrics_summary_disk`, `device_metrics_summary_memory`) created in migration `00000000000003` are affected, causing aggregated metrics to become stale. -- Without working CAGGs, queries against `device_metrics_summary` return old data, impacting dashboard accuracy for CPU, disk, and memory utilization trends. - -## What Changes -- Add a new migration (`00000000000018_recreate_device_metrics_caggs.up.sql`) that: - 1. Drops the existing continuous aggregate policies to stop failing jobs - 2. Drops the composite view `device_metrics_summary` that depends on the CAGGs - 3. Drops and recreates all three materialized views (`device_metrics_summary_cpu`, `device_metrics_summary_disk`, `device_metrics_summary_memory`) with fresh function OID bindings - 4. Re-adds the continuous aggregate and retention policies - 5. Recreates the composite view -- The down migration will be a no-op since the CAGGs will be functionally identical. -- Add cleanup migration (`00000000000019_remove_unused_caggs.up.sql`) to drop orphaned/unused summary CAGGs and associated policies that were still registering OIDs and triggering cache errors. -- Add a runbook documenting the manual recovery procedure for operators who encounter this issue between releases. -- Consider adding a CNPG health check that monitors `timescaledb_information.job_errors` for refresh policy failures and surfaces them in observability. - -## Impact -- Existing aggregated data in the CAGGs will be lost and need to re-materialize from the underlying hypertables. Since retention is 3 days on both the source tables and CAGGs, this means temporary gaps in historical summaries until the next refresh cycles complete. -- The migration runs as part of `serviceradar-core` startup; clusters with large amounts of metrics data may see slightly longer startup times due to CAGG recreation. -- No API or configuration changes required; the fix is transparent to consumers of the `device_metrics_summary` view. -- Future CNPG image updates should be tested for CAGG compatibility before deployment to avoid regression. - -## Root Cause Analysis -The error `cache lookup failed for function "time_bucket" with 2 args` indicates that PostgreSQL's function cache contains an OID reference that no longer exists in `pg_proc`. Research from TimescaleDB issue trackers reveals: - -1. **Function OID Volatility**: The `time_bucket(interval, timestamptz)` function's OID is assigned when TimescaleDB is created. If the extension is dropped/recreated or the cluster is reinitialized, new OIDs are assigned. - -2. **CAGG Internal Storage**: Continuous aggregates store a "finalized" query plan that includes hardcoded function OIDs. These are not automatically updated when the underlying functions change. - -3. **CNPG Lifecycle Events**: The CloudNativePG operator may trigger scenarios where extensions are reloaded (e.g., during major version upgrades, recovery from backup, or replica promotion). - -4. **Known TimescaleDB Limitation**: There is no supported mechanism to "repair" a CAGG with stale OID references; recreation is the only fix. - -## References -- GitHub Issue: https://github.com/carverauto/serviceradar/issues/2065 -- TimescaleDB Issue #1494: Cache lookup errors during extension operations -- TimescaleDB Issue #1492: Restart workaround for function cache issues -- Affected migrations: `pkg/db/cnpg/migrations/00000000000003_device_metrics_summary_cagg.up.sql` diff --git a/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/specs/cnpg/spec.md b/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/specs/cnpg/spec.md deleted file mode 100644 index fa826c11f..000000000 --- a/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/specs/cnpg/spec.md +++ /dev/null @@ -1,32 +0,0 @@ -## ADDED Requirements - -### Requirement: Continuous aggregate refresh policies remain functional across CNPG lifecycle events -TimescaleDB continuous aggregates MUST continue to refresh successfully after CNPG pod restarts, failovers, and extension reloads to ensure aggregated metrics remain current. - -#### Scenario: CAGG policies execute without cache lookup errors -- **GIVEN** device metrics CAGGs exist with refresh policies configured -- **WHEN** the CNPG cluster undergoes a pod restart or failover -- **THEN** the refresh policies continue to execute without "cache lookup failed" errors and `timescaledb_information.job_errors` contains no entries for the CAGG jobs. - -#### Scenario: Migration recreates CAGGs with fresh function bindings -- **GIVEN** a CNPG cluster with stale CAGG function OID references -- **WHEN** migration `00000000000018_recreate_device_metrics_caggs.up.sql` runs -- **THEN** all three device metrics CAGGs are dropped and recreated with valid `time_bucket` function bindings. - -#### Scenario: Composite view remains queryable after CAGG recreation -- **GIVEN** the `device_metrics_summary` composite view depends on the three underlying CAGGs -- **WHEN** the CAGGs are recreated by the migration -- **THEN** the composite view is also recreated and returns joined results from the underlying CAGGs. - -### Requirement: CAGG health is observable through standard monitoring -Operators MUST be able to detect CAGG refresh failures through CNPG logs or TimescaleDB system views without requiring direct database access. - -#### Scenario: Job errors appear in TimescaleDB information schema -- **GIVEN** a continuous aggregate refresh policy encounters an error -- **WHEN** an operator queries `SELECT * FROM timescaledb_information.job_errors ORDER BY finish_time DESC LIMIT 10` -- **THEN** the error details including the job ID and error message are visible. - -#### Scenario: CNPG logs capture refresh policy failures -- **GIVEN** a continuous aggregate refresh fails with an error -- **WHEN** the operator reviews CNPG pod logs -- **THEN** the error is logged with severity ERROR and includes the job ID and error message for correlation. diff --git a/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/tasks.md b/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/tasks.md deleted file mode 100644 index 10beb0e86..000000000 --- a/openspec/changes/fix-cnpg-continuous-aggregate-cache-error/tasks.md +++ /dev/null @@ -1,45 +0,0 @@ -# Tasks - -## Migration: Recreate Device Metrics CAGGs -- [x] Create migration file `pkg/db/cnpg/migrations/00000000000018_recreate_device_metrics_caggs.up.sql` - - [x] Add `SELECT remove_continuous_aggregate_policy()` for all three CAGGs - - [x] Add `SELECT remove_retention_policy()` for all three CAGGs - - [x] Drop `device_metrics_summary` composite view - - [x] Drop `device_metrics_summary_memory` materialized view - - [x] Drop `device_metrics_summary_disk` materialized view - - [x] Drop `device_metrics_summary_cpu` materialized view - - [x] Recreate CPU CAGG with `time_bucket(INTERVAL '5 minutes', timestamp)` - - [x] Set `timescaledb.materialized_only = FALSE` for CPU CAGG - - [x] Add continuous aggregate policy for CPU (3 days offset, 10 min end offset, 5 min interval) - - [x] Add retention policy for CPU (3 days) - - [x] Recreate Disk CAGG - - [x] Set `timescaledb.materialized_only = FALSE` for Disk CAGG - - [x] Add continuous aggregate policy for Disk - - [x] Add retention policy for Disk -- [x] Recreate Memory CAGG -- [x] Set `timescaledb.materialized_only = FALSE` for Memory CAGG -- [x] Add continuous aggregate policy for Memory -- [x] Add retention policy for Memory -- [x] Recreate `device_metrics_summary` composite view with JOINs -- [x] Create down migration `pkg/db/cnpg/migrations/00000000000018_recreate_device_metrics_caggs.down.sql` (no-op comment) -- [x] Add cleanup migration `pkg/db/cnpg/migrations/00000000000019_remove_unused_caggs.up.sql` to drop unused summary CAGGs/policies that still register OIDs. - -## Documentation -- [ ] Add runbook `docs/docs/runbooks/cnpg-cagg-refresh-error.md` with: - - [ ] Symptoms: How to identify the error in CNPG logs - - [ ] Diagnosis: Query `timescaledb_information.job_errors` to confirm - - [ ] Manual Fix: SQL commands to recreate CAGGs without waiting for migration - - [ ] Prevention: Notes on CNPG upgrade best practices - -## Verification -- [ ] Deploy migration to demo cluster -- [ ] Verify CAGGs are created: `\d+ device_metrics_summary_cpu` -- [ ] Verify policies are active: `SELECT * FROM timescaledb_information.jobs WHERE proc_name = 'policy_refresh_continuous_aggregate'` -- [ ] Wait for refresh cycle (5 min) and confirm no errors in `timescaledb_information.job_errors` -- [ ] Query `device_metrics_summary` and verify data is populating -- [ ] Close GitHub issue #2065 - -## Future Improvements (Optional) -- [ ] Add CNPG health check for CAGG job failures -- [ ] Consider alerting integration for `timescaledb_information.job_errors` -- [ ] Evaluate if CAGG definitions should use explicit function schema qualification diff --git a/openspec/changes/fix-cnpg-pool-sslmode-default/design.md b/openspec/changes/fix-cnpg-pool-sslmode-default/design.md new file mode 100644 index 000000000..f29eecb6e --- /dev/null +++ b/openspec/changes/fix-cnpg-pool-sslmode-default/design.md @@ -0,0 +1,31 @@ +## Context +ServiceRadar components connect to CNPG/Timescale using `models.CNPGDatabase` and `pkg/db/cnpg_pool.go:NewCNPGPool`. Today, when `ssl_mode` is omitted, the connection URL is built with `sslmode=disable` unconditionally, which can disable TLS even when a TLS config is present. + +## Goals / Non-Goals +- Goals: + - Prevent silent TLS downgrade when operators provide TLS materials. + - Default to a secure TLS mode that matches documented expectations. + - Provide clear, early failures for contradictory configurations. +- Non-Goals: + - Redesign CNPG configuration schema. + - Implement new certificate provisioning flows. + +## Decisions +- Decision: Default `ssl_mode` to `verify-full` when `tls` is configured and `ssl_mode` is unset. + - Rationale: `verify-full` provides the strongest default (chain + hostname) and aligns with existing deployment guidance that expects `verify-full`. +- Decision: Treat `tls` + `ssl_mode=disable` as invalid and return an error. + - Rationale: This combination is always a footgun; allowing it results in silent plaintext connections. + +## Risks / Trade-offs +- Risk: `verify-full` can fail if operators connect to CNPG by IP or an alias not present in the server cert SAN. + - Mitigation: document explicit overrides (`verify-ca` or `require`) and/or ensure CNPG server cert SAN covers the configured host. + +## Migration Plan +1. Ship the defaulting + validation behavior behind the existing config model (no schema changes). +2. Update tests to lock in behavior. +3. Update operator docs to explain override behavior. +4. Roll out to demo/compose; validate CNPG connectivity in both TLS and non-TLS modes. + +## Open Questions +- Should we log a structured warning when `ssl_mode` is empty and we default it (for auditability), or keep it silent? + diff --git a/openspec/changes/fix-cnpg-pool-sslmode-default/proposal.md b/openspec/changes/fix-cnpg-pool-sslmode-default/proposal.md new file mode 100644 index 000000000..cec6cfc23 --- /dev/null +++ b/openspec/changes/fix-cnpg-pool-sslmode-default/proposal.md @@ -0,0 +1,29 @@ +# Change: Default CNPG sslmode based on TLS configuration + +## Why +`pkg/db/cnpg_pool.go:NewCNPGPool` currently defaults `sslmode=disable` whenever `ssl_mode` is unset, even when a full client TLS configuration is provided. This silently downgrades CNPG connections to plaintext and can violate production security expectations. + +## What Changes +- Default `ssl_mode` to `verify-full` when `tls` config is provided and `ssl_mode` is unset. +- Reject contradictory configuration where `tls` is provided but `ssl_mode=disable` is explicitly set. +- Add unit tests covering defaulting/validation behavior and preventing future regressions. +- Document the resulting behavior and how to override `ssl_mode` for non-DNS/SAN scenarios (e.g., IP-based connections). + +## Impact +- Affected specs: `cnpg` +- Affected code: `pkg/db/cnpg_pool.go`, `pkg/models/db.go` (docs/comments only), and tests under `pkg/db/` +- Compatibility: + - **Behavioral change**: configurations that provide `tls` but omit `ssl_mode` will start enforcing TLS (previously: silently plaintext). + - Potential new failures if hostnames do not match the CNPG server certificate (mitigation: set `ssl_mode=verify-ca` or `require`, or ensure the server cert SAN matches the configured host). + +## Acceptance Criteria +- When `tls` is provided and `ssl_mode` is unset, CNPG connections negotiate TLS and default to `ssl_mode=verify-full`. +- When `tls` is provided and `ssl_mode=disable`, pool creation fails fast with a clear configuration error. +- Unit tests cover both behaviors and prevent regressions. + +## Rollout Plan +- Land the code + tests, then validate in Docker Compose (CNPG TLS enabled) and in demo Helm/Kubernetes environments. +- If an environment requires IP-based DB addressing, set `ssl_mode=verify-ca` or `require`, or regenerate CNPG server certs with the correct SANs. + +## References +- GitHub issue: https://github.com/carverauto/serviceradar/issues/2143 diff --git a/openspec/changes/fix-cnpg-pool-sslmode-default/specs/cnpg/spec.md b/openspec/changes/fix-cnpg-pool-sslmode-default/specs/cnpg/spec.md new file mode 100644 index 000000000..0ec7e1f5e --- /dev/null +++ b/openspec/changes/fix-cnpg-pool-sslmode-default/specs/cnpg/spec.md @@ -0,0 +1,30 @@ +## ADDED Requirements +### Requirement: CNPG client connections default to TLS when configured +ServiceRadar components that connect to CNPG using `models.CNPGDatabase` MUST negotiate TLS when `tls` configuration is provided, even if `ssl_mode` is not explicitly set. + +#### Scenario: TLS config without ssl_mode uses verify-full +- **GIVEN** a `models.CNPGDatabase` configuration with `tls` set and `ssl_mode` unset +- **WHEN** a component builds its CNPG connection pool using `pkg/db/cnpg_pool.go:NewCNPGPool` +- **THEN** the connection attempts TLS using the provided client certificate and CA, with `ssl_mode` defaulting to `verify-full`. + +#### Scenario: No TLS config preserves plaintext defaults +- **GIVEN** a `models.CNPGDatabase` configuration with `tls` unset and `ssl_mode` unset +- **WHEN** a component builds its CNPG connection pool +- **THEN** the connection defaults to `ssl_mode=disable` for local/dev compatibility unless explicitly configured otherwise. + +### Requirement: CNPG client configuration rejects insecure contradictions +ServiceRadar MUST reject CNPG configurations where `tls` is provided but `ssl_mode` is explicitly set to `disable`. + +#### Scenario: tls + ssl_mode=disable fails fast +- **GIVEN** a `models.CNPGDatabase` configuration with `tls` set and `ssl_mode=disable` +- **WHEN** a component attempts to build its CNPG connection pool +- **THEN** pool initialization fails with an error describing the invalid configuration. + +### Requirement: CNPG clients honor explicit ssl_mode overrides +When `ssl_mode` is explicitly set, ServiceRadar MUST honor the configured value (e.g., `require`, `verify-ca`, `verify-full`) subject to validation rules. + +#### Scenario: Explicit verify-ca is preserved +- **GIVEN** a `models.CNPGDatabase` configuration with `tls` set and `ssl_mode=verify-ca` +- **WHEN** a component builds its CNPG connection pool +- **THEN** the connection attempts TLS and uses `ssl_mode=verify-ca`. + diff --git a/openspec/changes/fix-cnpg-pool-sslmode-default/tasks.md b/openspec/changes/fix-cnpg-pool-sslmode-default/tasks.md new file mode 100644 index 000000000..fb845d8ab --- /dev/null +++ b/openspec/changes/fix-cnpg-pool-sslmode-default/tasks.md @@ -0,0 +1,13 @@ +## 1. Implementation +- [x] 1.1 Update `pkg/db/cnpg_pool.go:NewCNPGPool` to default `sslmode=verify-full` when `cfg.TLS != nil` and `cfg.SSLMode == ""` +- [x] 1.2 Add validation so `cfg.TLS != nil` + `cfg.SSLMode == "disable"` returns an error before dialing +- [x] 1.3 Ensure explicit `cfg.SSLMode` values (`require`, `verify-ca`, `verify-full`, etc.) are preserved (subject to validation) +- [x] 1.4 Add unit tests for defaulting and validation (no network required; validate connection string / config behavior) + +## 2. Documentation +- [ ] 2.1 Update `openspec/specs/cnpg/spec.md` (via archive) to reflect secure defaults and validation rules +- [ ] 2.2 Add/adjust a short operator note in docs (if needed) describing `ssl_mode` override guidance for IP-based connections + +## 3. Validation +- [x] 3.1 Run `openspec validate fix-cnpg-pool-sslmode-default --strict` +- [x] 3.2 Run relevant Go unit tests (targeted `go test ./pkg/db/...`) diff --git a/openspec/changes/fix-core-oom-age-graph-backpressure/proposal.md b/openspec/changes/fix-core-oom-age-graph-backpressure/proposal.md deleted file mode 100644 index ada6c8399..000000000 --- a/openspec/changes/fix-core-oom-age-graph-backpressure/proposal.md +++ /dev/null @@ -1,26 +0,0 @@ -# Change: Fix core OOM crashes from AGE graph queue memory pressure - -## Why -Core in the demo namespace is being OOMKilled (4 restarts in 8 hours) despite having 4Gi memory allocated. Investigation reveals a memory leak pattern in the AGE graph writer: - -1. **Queue backlog builds up**: Queue depth reaches 38-39 items with 120+ second wait times -2. **Single worker bottleneck**: Default `AGE_GRAPH_WORKERS=1` processes batches in 5-8 seconds each, cannot drain incoming work -3. **Large sync payloads**: ~24MB messages with 16,384 device updates processed every ~5 minutes -4. **Memory accumulates in goroutines**: Each `enqueue()` call creates a goroutine that waits on a result channel for up to 2 minutes, holding payload data in memory -5. **Retry amplification**: Failed batches retry 3x with backoff, extending memory retention -6. **No memory-aware backpressure**: Queue accepts work until full (512 items) regardless of memory pressure - -The result is memory grows from 3.3Gi toward 4Gi until the kernel OOMKills the process. - -## What Changes -- Increase default AGE graph workers from 1 to 4 to improve queue drain rate -- Add memory-aware backpressure that rejects new batches when memory usage exceeds a threshold (e.g., 80% of limit) -- Implement fire-and-forget mode for non-critical graph updates to avoid goroutine accumulation -- Add circuit breaker pattern to temporarily disable graph writes when AGE is overloaded -- Expose memory usage and queue pressure metrics for alerting -- Consider payload size limits or chunking for very large sync batches (>16K devices) - -## Impact -- Affected specs: device-relationship-graph -- Affected code: pkg/registry/age_graph_writer.go, pkg/registry/age_graph_metrics.go, pkg/core/registry_handler.go -- K8s config: Consider increasing memory limits or adding memory monitoring alerts diff --git a/openspec/changes/fix-core-oom-age-graph-backpressure/specs/device-relationship-graph/spec.md b/openspec/changes/fix-core-oom-age-graph-backpressure/specs/device-relationship-graph/spec.md deleted file mode 100644 index 4c21ed81f..000000000 --- a/openspec/changes/fix-core-oom-age-graph-backpressure/specs/device-relationship-graph/spec.md +++ /dev/null @@ -1,46 +0,0 @@ -## ADDED Requirements - -### Requirement: AGE graph writer prevents OOM through memory-aware backpressure -The system SHALL implement memory-bounded queueing in the AGE graph writer to prevent core OOM crashes when graph write throughput cannot keep pace with incoming updates. - -#### Scenario: High ingestion rate with slow AGE response -- **WHEN** device updates arrive faster than AGE can process them -- **AND** the queue depth approaches capacity -- **THEN** the writer drops or rejects new batches instead of accumulating goroutines and memory -- **AND** metrics indicate rejected batches due to backpressure - -#### Scenario: Memory threshold triggers early rejection -- **WHEN** Go heap memory exceeds the configured threshold (default 80% of container limit) -- **THEN** the writer rejects new graph batches until memory pressure subsides -- **AND** rejected batches are logged with reason "memory_pressure" - -### Requirement: AGE graph writer scales with multiple workers -The system SHALL process AGE graph merges with configurable worker count (default 4) to improve queue drain rate under load. - -#### Scenario: Parallel workers drain queue faster -- **GIVEN** AGE_GRAPH_WORKERS=4 (default) -- **WHEN** multiple batches are queued for processing -- **THEN** up to 4 batches are processed concurrently -- **AND** queue depth remains stable during steady-state ingestion - -### Requirement: Large payloads are chunked before queueing -The system SHALL split device update batches larger than a configurable threshold into smaller chunks to limit per-request memory footprint. - -#### Scenario: Large sync message is chunked -- **WHEN** a sync service reports 16,384 device updates in a single message -- **AND** the chunk threshold is set to 5,000 devices -- **THEN** the updates are split into 4 chunks before entering the queue -- **AND** each chunk is processed independently - -### Requirement: Circuit breaker prevents cascading failures -The system SHALL implement a circuit breaker that temporarily disables AGE graph writes after repeated failures to prevent resource exhaustion. - -#### Scenario: Circuit opens after failures -- **WHEN** 10 consecutive AGE graph batches fail -- **THEN** the circuit breaker opens and rejects subsequent batches immediately -- **AND** the circuit enters half-open state after 60 seconds to test recovery - -#### Scenario: Circuit closes after recovery -- **WHEN** the circuit is half-open -- **AND** a test batch succeeds -- **THEN** the circuit closes and normal processing resumes diff --git a/openspec/changes/fix-core-oom-age-graph-backpressure/tasks.md b/openspec/changes/fix-core-oom-age-graph-backpressure/tasks.md deleted file mode 100644 index 98d6fa85f..000000000 --- a/openspec/changes/fix-core-oom-age-graph-backpressure/tasks.md +++ /dev/null @@ -1,24 +0,0 @@ -## 1. Immediate mitigations -- [x] 1.1 Increase `AGE_GRAPH_WORKERS` default from 1 to 4 to improve queue drain rate -- [x] 1.2 Reduce `AGE_GRAPH_QUEUE_SIZE` default from 512 to 256 to limit memory footprint -- [x] 1.3 Add memory usage metrics (Go runtime memstats) to core prometheus endpoint - -## 2. Memory-bounded queueing -- [x] 2.1 Add `AGE_GRAPH_MEMORY_LIMIT_MB` config that triggers early rejection when Go heap exceeds threshold -- [x] 2.2 Implement non-blocking enqueue mode that drops batches instead of waiting when queue is full -- [x] 2.3 Add metrics for dropped/rejected batches to distinguish memory pressure from AGE failures - -## 3. Payload optimization -- [x] 3.1 Existing chunking (128 items per batch via `AGE_GRAPH_CHUNK_SIZE`) is sufficient -- [ ] 3.2 Consider streaming/incremental processing for sync service messages instead of loading entire 24MB payload -- [ ] 3.3 Add payload size metrics to track large message patterns - -## 4. Circuit breaker pattern -- [x] 4.1 Implement circuit breaker that temporarily disables graph writes after N consecutive failures -- [x] 4.2 Add half-open state that tests recovery with single batch before re-enabling -- [x] 4.3 Log circuit state changes and expose as metric/health check - -## 5. Validation -- [x] 5.1 Deploy to demo namespace and verify no OOMKilled restarts over 24 hours -- [ ] 5.2 Confirm graph data continues to be written during steady-state operation -- [ ] 5.3 Verify metrics expose memory pressure and queue depth accurately diff --git a/openspec/changes/fix-icmp-service-device-attribution/proposal.md b/openspec/changes/fix-icmp-service-device-attribution/proposal.md deleted file mode 100644 index c31a36d2e..000000000 --- a/openspec/changes/fix-icmp-service-device-attribution/proposal.md +++ /dev/null @@ -1,19 +0,0 @@ -# Change: Fix ICMP Collector Attribution for Service Devices - -## Why -ICMP sparklines disappeared for `k8s-agent` in the demo Kubernetes inventory (issue #2069). Investigation revealed two root causes: - -1. **Search planner routing bug**: The device search planner's `supportsRegistry()` function returned `true` for `device_id:` queries, but `executeRegistry()` never implemented device_id filtering. This caused device details pages to return the wrong device (e.g., showing `k8s-poller` when viewing `k8s-agent`). - -2. **SRQL auth misconfiguration**: The Core init script set `.api_key` at root level but not `.srql.api_key`, causing Core's SRQL queries to fail with 401 authentication errors. - -The original hypothesis about agent ICMP attribution was incorrect - the ICMP metrics were already correctly attributed to `serviceradar:agent:k8s-agent` in the database. The issue was that queries couldn't retrieve them. - -## What Changes -- **Search planner fix**: Reject `device_id:` queries from the registry engine, forcing them through SRQL which correctly implements device_id filtering (`pkg/search/planner.go`). -- **Auth config fix**: Add `.srql.api_key = $api_key` to the Core init script so SRQL queries authenticate correctly (`helm/serviceradar/files/serviceradar-config.yaml`). -- Verify ICMP data appears in both device inventory (sparklines) and device details (timeline) views. - -## Impact -- Affected specs: `service-device-capabilities` -- Affected code: `pkg/search/planner.go` (supportsRegistry), `helm/serviceradar/files/serviceradar-config.yaml` (init script) diff --git a/openspec/changes/fix-icmp-service-device-attribution/specs/service-device-capabilities/spec.md b/openspec/changes/fix-icmp-service-device-attribution/specs/service-device-capabilities/spec.md deleted file mode 100644 index 2a43ecd68..000000000 --- a/openspec/changes/fix-icmp-service-device-attribution/specs/service-device-capabilities/spec.md +++ /dev/null @@ -1,18 +0,0 @@ -## ADDED Requirements -### Requirement: Device ID queries use exact-match engine -Device search queries containing `device_id:` filters SHALL use an engine that supports exact device ID matching (SRQL), not the registry engine which lacks device ID filtering. - -#### Scenario: Device details page returns correct device -- **WHEN** the frontend requests device details for `serviceradar:agent:k8s-agent` via a `device_id:"serviceradar:agent:k8s-agent"` query -- **THEN** the search planner routes to SRQL (not registry), and the response contains only that device with its correct ICMP metrics and capability data. - -#### Scenario: Registry engine rejects device_id queries -- **WHEN** the search planner evaluates a query containing `device_id:` filter -- **THEN** `supportsRegistry()` returns false, forcing the query through SRQL which correctly implements device_id filtering. - -### Requirement: Core SRQL client authenticates correctly -The Core service's SRQL client SHALL be configured with the API key so SRQL queries authenticate successfully. - -#### Scenario: Core SRQL queries succeed with auth -- **WHEN** the Core service calls SRQL for timeseries metrics or device queries -- **THEN** the SRQL client includes the `X-API-Key` header and receives a successful response (not 401). diff --git a/openspec/changes/fix-icmp-service-device-attribution/tasks.md b/openspec/changes/fix-icmp-service-device-attribution/tasks.md deleted file mode 100644 index f2a224583..000000000 --- a/openspec/changes/fix-icmp-service-device-attribution/tasks.md +++ /dev/null @@ -1,19 +0,0 @@ -## 1. Implementation - -### Completed -- [x] 1.1 Fix search planner to reject `device_id:` queries from registry engine, forcing SRQL path which correctly filters by device_id (`pkg/search/planner.go:322-325`). -- [x] 1.2 Fix Core init script to set `.srql.api_key` so SRQL queries authenticate correctly (`helm/serviceradar/files/serviceradar-config.yaml`). -- [x] 1.3 Deploy fix via Helm upgrade with new image tag `sha-691d182cd47b1ec746b88c5544b64b3699d91e8f`. -- [x] 1.4 Verify SRQL backend returns correct ICMP metrics for `serviceradar:agent:k8s-agent` with fresh timestamps. -- [x] 1.5 Verify device inventory sparklines render ICMP for `k8s-agent` (confirmed `metrics_summary.icmp=true`). -- [x] 1.6 Fix device details ICMP timeline to display ICMP values instead of flattening to zero when units vary (handle ns/ms scaling in `web/src/components/Devices/DeviceDetail.tsx`). -- [x] 1.7 Add regression test to ensure `device_id:` queries bypass the registry engine (`pkg/search/planner_test.go`). - -### Pending -- [ ] None (all implementation tasks for this change are currently completed) - -## 2. Investigation Notes -- Original hypothesis (agent ICMP attribution) was incorrect - ICMP metrics were correctly written to `serviceradar:agent:k8s-agent`. -- Actual bugs: (1) registry engine returned wrong device for `device_id:` queries; (2) Core couldn't auth to SRQL. -- Backend is now working: SRQL returns ICMP metrics with timestamps within seconds of query time. -- Frontend issue: Device details page (`DeviceDetail.tsx`) queries `in:timeseries_metrics` with time window but displays stale data. May be CDN caching, web API routing, or frontend parsing issue. diff --git a/openspec/changes/fix-identity-batch-lookup-partition/design.md b/openspec/changes/fix-identity-batch-lookup-partition/design.md new file mode 100644 index 000000000..4a51adf7a --- /dev/null +++ b/openspec/changes/fix-identity-batch-lookup-partition/design.md @@ -0,0 +1,32 @@ +## Context +`IdentityEngine.lookupByStrongIdentifiers` correctly filters identifier lookups by `partition`, but `IdentityEngine.batchLookupByStrongIdentifiers` does not. This creates cross-partition identity assignment when identifier values collide across partitions within the same batch. + +## Goals / Non-Goals +- Goals: + - Guarantee partition isolation for all strong-identifier lookups, including batch paths. + - Preserve batch-mode performance characteristics for single-partition batches. +- Non-Goals: + - Data repair/migration of previously-corrupted device identities (out of scope for this change; handled operationally if needed). + +## Decisions +- Decision: Group updates by partition and run per-partition batch queries. + - Rationale: Minimizes SQL complexity, keeps the lookup API stable (single partition per call), and naturally matches the correctness constraint. + +## Alternatives Considered +- Single query across partitions using composite keys (e.g., `UNNEST(partitions, values)` and joining on both columns). + - Rejected: Higher SQL complexity and more complicated result mapping; can be revisited if per-partition query counts become a bottleneck. +- Disable batch lookup when mixed partitions are present (fall back to single lookups). + - Rejected: Correct but loses the optimization exactly when batch sizes are large and mixed partitions are common. + +## Risks / Trade-offs +- More queries when a batch spans many partitions. + - Mitigation: Partition count per batch is typically small; deduplicate identifier values within each partition and type to reduce query payload. + +## Migration Plan +1. Update DB API + SQL to require partition for batch lookups. +2. Update IdentityEngine to group-by-partition and call the new API. +3. Add regression tests for mixed-partition batches. + +## Open Questions +- Should we add an explicit metric for identifier collisions across partitions to aid detection of multi-tenant environments with common MAC reuse? + diff --git a/openspec/changes/fix-identity-batch-lookup-partition/proposal.md b/openspec/changes/fix-identity-batch-lookup-partition/proposal.md new file mode 100644 index 000000000..684344263 --- /dev/null +++ b/openspec/changes/fix-identity-batch-lookup-partition/proposal.md @@ -0,0 +1,22 @@ +# Change: Fix partition-scoped batch identifier lookup + +## Why +GitHub issue `#2140` reports that `IdentityEngine.batchLookupByStrongIdentifiers` performs batch identifier lookups without filtering by `partition`. In a multi-tenant deployment, two partitions can legitimately contain the same identifier value (e.g., MAC address). When updates from different partitions land in the same batch, the current implementation can assign the first-matched `device_id` to all updates, violating partition isolation and silently corrupting device identity. + +This is a security-relevant correctness issue: cross-partition identity assignment can leak inventory and telemetry between tenants and break the integrity of the canonical device model. + +## What Changes +- Add a `partition` parameter to the DB batch lookup API and filter the underlying SQL query by partition. +- Update `IdentityEngine.batchLookupByStrongIdentifiers` to preserve correctness when a batch contains multiple partitions by grouping updates by partition and performing per-partition batch lookups. +- Add regression tests to ensure mixed-partition batches resolve to partition-correct device IDs (no cross-partition matches). + +## Impact +- Affected specs: `device-identity-reconciliation` +- Affected code: + - `pkg/registry/identity_engine.go` + - `pkg/db/interfaces.go` + - `pkg/db/cnpg_identity_engine.go` + - `pkg/db/mock_db.go` + - `pkg/registry/*_test.go` (new regression coverage) +- Behavior change: only for mixed-partition batches with colliding identifier values; fixes prior incorrect cross-partition assignment. + diff --git a/openspec/changes/fix-identity-batch-lookup-partition/specs/device-identity-reconciliation/spec.md b/openspec/changes/fix-identity-batch-lookup-partition/specs/device-identity-reconciliation/spec.md new file mode 100644 index 000000000..1aa9b576a --- /dev/null +++ b/openspec/changes/fix-identity-batch-lookup-partition/specs/device-identity-reconciliation/spec.md @@ -0,0 +1,13 @@ +## ADDED Requirements + +### Requirement: Partition-Scoped Batch Identifier Lookup +The system MUST resolve strong identifiers in batch mode within the update's partition, and MUST NOT match identifiers across partitions. + +#### Scenario: Same identifier in different partitions +- **WHEN** two device updates in the same batch share the same strong identifier value but have different partitions +- **THEN** each update resolves to the device ID that matches its own partition + +#### Scenario: Empty partition defaults consistently +- **WHEN** a device update has an empty partition value +- **THEN** identifier resolution treats it as partition `default` for both single and batch lookup paths + diff --git a/openspec/changes/fix-identity-batch-lookup-partition/tasks.md b/openspec/changes/fix-identity-batch-lookup-partition/tasks.md new file mode 100644 index 000000000..6de959302 --- /dev/null +++ b/openspec/changes/fix-identity-batch-lookup-partition/tasks.md @@ -0,0 +1,17 @@ +## 1. Database Layer +- [x] 1.1 Update `BatchGetDeviceIDsByIdentifier` to accept a `partition` parameter +- [x] 1.2 Add `AND partition = $3` to `batchGetDeviceIDsByIdentifierSQL` +- [x] 1.3 Update DB interface and gomock mocks/callers for the new signature + +## 2. Identity Engine +- [x] 2.1 Group batch updates by partition (defaulting empty to `default`) +- [x] 2.2 For each partition: batch query strong identifiers by type using the partition-aware DB API +- [x] 2.3 Map identifier hits back to updates using the existing strong-ID priority order + +## 3. Tests +- [x] 3.1 Add regression test: two partitions with the same MAC in the same batch resolve to different device IDs +- [x] 3.2 Update existing tests/mocks that expect `BatchGetDeviceIDsByIdentifier` calls +- [x] 3.3 Run `go test ./pkg/registry/...` and `go test ./pkg/db/...` + +## 4. Validation +- [x] 4.1 Run `openspec validate fix-identity-batch-lookup-partition --strict` diff --git a/openspec/changes/fix-mcp-srql-sql-injection/design.md b/openspec/changes/fix-mcp-srql-sql-injection/design.md new file mode 100644 index 000000000..73dcc6902 --- /dev/null +++ b/openspec/changes/fix-mcp-srql-sql-injection/design.md @@ -0,0 +1,30 @@ +## Context +The MCP server exposes intent-based tools that construct SRQL queries from structured parameters. Several tools/query builders currently build filters using `fmt.Sprintf("field = '%s'", userValue)`, which can be broken by unescaped quotes and allows SRQL/SQL injection via crafted input. + +## Goals / Non-Goals +- Goals: + - Ensure structured scalar parameters are always treated as bound values (not SRQL fragments). + - Apply the fix consistently across all MCP tools and shared query builders. + - Add tests that would fail if future changes reintroduce unsafe interpolation. +- Non-Goals: + - Redesign SRQL syntax/grammar beyond what MCP needs. + - Introduce new external dependencies or a new security framework. + - Change authn/authz or disable tools that intentionally accept raw SRQL. + +## Decisions +- Decision: Require parameterized SRQL execution for structured scalar tool parameters. + - Rationale: Quoting/escaping at the SRQL text layer is easy to miss in new code. Binding parameters makes the “value vs expression” boundary explicit and testable. + - Result: MCP query builders use SRQL placeholders (for example `$1`, `$2`, ...) and pass parameter values out-of-band to the query executor. +- Decision: Treat raw SRQL inputs as pass-through. + - Rationale: Some tools explicitly accept raw SRQL input (for example `filter` fields or `srql.query`). Altering or binding values inside those strings would change semantics and can introduce placeholder collisions. + +## Risks / Trade-offs +- Risk: Parameter support depends on the configured query executor implementation. + - Mitigation: Make structured tools require a parameter-capable executor; add tests that assert parameter binding is used for those tool paths. + +## Migration Plan +- No storage migration. +- Implementation consists of code-only changes in `pkg/mcp` plus tests; roll out as a patch release. + +## Open Questions +- Should MCP tools that accept free-form SRQL be restricted or disabled by default to preserve “intent-based” security boundaries? diff --git a/openspec/changes/fix-mcp-srql-sql-injection/proposal.md b/openspec/changes/fix-mcp-srql-sql-injection/proposal.md new file mode 100644 index 000000000..3ac95fcf4 --- /dev/null +++ b/openspec/changes/fix-mcp-srql-sql-injection/proposal.md @@ -0,0 +1,15 @@ +# Change: Harden MCP SRQL query construction against injection + +## Why +GitHub issue #2142 reports that the MCP server constructs SRQL queries by interpolating user-supplied parameters into quoted strings (for example `device_id = '%s'`). Crafted inputs containing quotes and operators can break out of the intended string literal and alter the query semantics. + +## What Changes +- Use parameterized SRQL execution for structured scalar parameters so user input is bound as parameters rather than concatenated into SRQL text. +- Update MCP tools and shared query builders to treat identifier parameters as opaque values (not SRQL fragments). +- Add regression tests that prove injection payloads do not widen filters or change the structure of generated SRQL queries. + +## Impact +- Affected specs: `mcp` (new) +- Affected code: `pkg/mcp/tools_devices.go`, `pkg/mcp/server.go`, `pkg/mcp/query_utils.go`, `pkg/mcp/builder.go`, `pkg/mcp/tools_logs.go`, `pkg/mcp/tools_events.go`, `pkg/mcp/tools_sweeps.go` +- Compatibility: Behavior should remain the same for normal identifiers; inputs containing quotes no longer alter the WHERE clause structure. MCP deployments MUST provide a query executor that supports parameter binding for structured tools. +- Out of scope: Changing authorization or availability of tools that accept free-form SRQL (for example `srql.query`). diff --git a/openspec/changes/fix-mcp-srql-sql-injection/specs/mcp/spec.md b/openspec/changes/fix-mcp-srql-sql-injection/specs/mcp/spec.md new file mode 100644 index 000000000..8fde7a5d6 --- /dev/null +++ b/openspec/changes/fix-mcp-srql-sql-injection/specs/mcp/spec.md @@ -0,0 +1,40 @@ +## ADDED Requirements + +### Requirement: MCP tool parameters are quoted as SRQL literals +The MCP server MUST treat structured tool parameters that represent scalar string values (for example identifiers, names, and timestamps) as bound values when constructing SRQL queries, and MUST NOT concatenate raw parameter text into SRQL fragments. + +#### Scenario: Device ID input cannot widen a query +- **GIVEN** a `devices.getDevice` request with `device_id` containing quotes and operators (for example `device' OR '1'='1`) +- **WHEN** the MCP server constructs the SRQL query +- **THEN** the query compares `device_id` to a single bound value representing the entire input value +- **AND** the query structure is not modified by the input (no additional boolean conditions are introduced) + +#### Scenario: Poller ID input cannot escape its filter +- **GIVEN** a request that filters by `poller_id` via a structured parameter (not a raw SRQL filter string) +- **WHEN** the MCP server constructs the SRQL query +- **THEN** `poller_id` is represented as a bound value and cannot terminate or extend the filter expression + +### Requirement: MCP uses centralized parameter binding for scalar values +The MCP server MUST implement and use a single internal parameter binding mechanism for structured scalar values across all tools and shared query builders. + +#### Scenario: Consistent quoting across tools +- **GIVEN** multiple MCP tools that include scalar string parameters in constructed SRQL queries +- **WHEN** each tool constructs its SRQL query +- **THEN** all scalar string parameters are bound using the same internal mechanism + +### Requirement: Free-form SRQL is explicitly opt-in +Tools that accept free-form SRQL strings MUST explicitly label the parameter as raw SRQL input (for example `query` or `filter`) and MUST document that the value is passed through. Tools that accept structured scalar parameters MUST NOT interpret those parameters as SRQL fragments. + +#### Scenario: Structured parameters are not treated as raw SRQL +- **GIVEN** a tool that accepts `device_id` or `poller_id` as a structured parameter +- **WHEN** the parameter contains SRQL operators +- **THEN** the parameter is treated as a literal value, not parsed as SRQL syntax + +### Requirement: MCP has regression tests for injection payloads +The MCP codebase MUST include tests that assert SRQL generated from structured tool parameters is safe against quote-based injection payloads. + +#### Scenario: Regression tests detect unsafe interpolation +- **GIVEN** a known injection payload containing quotes and boolean operators +- **WHEN** the query builder is exercised in a test +- **THEN** the produced SRQL query binds the payload as a single parameter value +- **AND** the test fails if the payload alters the query structure diff --git a/openspec/changes/fix-mcp-srql-sql-injection/tasks.md b/openspec/changes/fix-mcp-srql-sql-injection/tasks.md new file mode 100644 index 000000000..27da33866 --- /dev/null +++ b/openspec/changes/fix-mcp-srql-sql-injection/tasks.md @@ -0,0 +1,18 @@ +# Tasks: Harden MCP SRQL query construction against injection + +## 1. Audit and centralize quoting +### 1.1 Inventory unsafe interpolation +- [x] 1.1 Inventory all MCP SRQL builders that interpolate user-controlled string parameters (tools, query utils, generic filter builder). + +### 1.2 Add parameterized execution support +- [x] 1.2 Introduce a parameter-capable executor interface (in addition to the existing one) and a helper to execute SRQL with bound params. + +## 2. Apply fixes across MCP +- [x] 2.1 Update `devices.getDevice` and `executeGetDevice` to use bound parameters for `device_id` (no raw interpolation). +- [x] 2.2 Update shared query builders to bind `poller_id`, `device_type`, `status`, and timestamp parameters where they are treated as values. +- [x] 2.3 Update the generic filter builder to bind all mapped string fields as values. + +## 3. Tests and validation +- [x] 3.1 Add unit tests covering representative injection payloads (quotes, OR, comments) to assert generated queries remain well-formed and structurally unchanged. +- [x] 3.2 Run `openspec validate fix-mcp-srql-sql-injection --strict`. +- [x] 3.3 Run `gofmt` and targeted `go test` for `pkg/mcp` changes. diff --git a/openspec/changes/fix-pgx-batch-error-handling/design.md b/openspec/changes/fix-pgx-batch-error-handling/design.md new file mode 100644 index 000000000..71ff1b4ee --- /dev/null +++ b/openspec/changes/fix-pgx-batch-error-handling/design.md @@ -0,0 +1,39 @@ +## Context +`pgx.SendBatch` requires callers to read results for each queued command to reliably surface per-command errors. Several ServiceRadar DB write paths only call `BatchResults.Close()`, which can discard insert errors and hide partial write failures. + +The codebase already contains correct examples (for example, `sendCNPG` in `pkg/db/cnpg_metrics.go` and `sendCNPGBatch` in `pkg/db/cnpg_device_updates_retry.go`) that call `br.Exec()` for every queued command before closing. + +## Goals / Non-Goals +- Goals: + - Ensure every CNPG batch write path consumes results for each queued command. + - Surface INSERT failures to callers (or log them explicitly for best-effort paths). + - Reduce copy/paste batch handling logic to avoid reintroducing the bug. +- Non-Goals: + - Change SQL behavior, schemas, or add transactional semantics. + - Add new dependencies unless required for testing. + +## Decisions +- Decision: Add a small helper in `pkg/db` that implements the standard pattern: + - `br := executor.SendBatch(ctx, batch)` + - `for i := 0; i < batch.Len(); i++ { br.Exec() }` + - `br.Close()` in a `defer` to ensure cleanup even on early return + - Include `operation name` and `command index` in errors + +## Alternatives Considered +- Inline fixes at each call site: + - Pros: minimal new code + - Cons: easy to miss future uses; inconsistency across files + +## Risks / Trade-offs +- Risk: Surfacing errors may expose previously hidden data problems (bad inputs, constraint issues) and cause callers to retry or fail where they previously succeeded. +- Trade-off: Slight additional CPU per batch to read results, but this is required for correctness and is already the established pattern elsewhere in the codebase. + +## Migration Plan +1. Add helper + tests. +2. Update the affected call sites (starting with `InsertEvents` and `StoreBatchUsers`). +3. Audit remaining `SendBatch` usages and migrate them. + +## Open Questions +- For best-effort audit/log batches (for example, device deletion audit trails), should the system: + - Continue and only log errors (current behavior), or + - Fail the higher-level operation when the audit record cannot be written? diff --git a/openspec/changes/fix-pgx-batch-error-handling/proposal.md b/openspec/changes/fix-pgx-batch-error-handling/proposal.md new file mode 100644 index 000000000..c0e19ddc5 --- /dev/null +++ b/openspec/changes/fix-pgx-batch-error-handling/proposal.md @@ -0,0 +1,19 @@ +# Change: Fix pgx batch error handling for CNPG writes + +## Why +Issue #2153 identifies multiple CNPG write paths that use `pgx.SendBatch` and then immediately call `BatchResults.Close()` without reading results via `BatchResults.Exec()`. This can silently discard per-statement INSERT errors, causing undetected data loss (for example, dropped CloudEvents or missing user rows) while callers proceed as if the write succeeded. + +## What Changes +- Update CNPG batch write call sites to always read each queued batch result (`br.Exec()`) before closing (`br.Close()`), returning the first encountered error with context (operation name + command index). +- Introduce a small shared helper for "exec all + close" to keep batch handling consistent and reduce the risk of future regressions. +- Audit existing `SendBatch` usages and bring them onto the same pattern, including paths that currently log-and-continue so they still drain results and can log the real insert error. + +## Impact +- Affected specs: `db-batch-writes` (new) +- Affected code (expected): + - `pkg/db/events.go` (`InsertEvents`) + - `pkg/db/auth.go` (`StoreBatchUsers`) + - `pkg/db/cnpg_unified_devices.go` (`DeleteDevices` audit batch) + - Other `pkg/db/**` `SendBatch` call sites discovered during audit +- Behavior change: previously hidden DB write failures will now return errors to callers (or be logged explicitly for "best-effort" audit batches). +- Risk: Low. This change only affects error surfacing and batch result consumption; it does not change SQL statements or schemas. diff --git a/openspec/changes/fix-pgx-batch-error-handling/specs/db-batch-writes/spec.md b/openspec/changes/fix-pgx-batch-error-handling/specs/db-batch-writes/spec.md new file mode 100644 index 000000000..8705be112 --- /dev/null +++ b/openspec/changes/fix-pgx-batch-error-handling/specs/db-batch-writes/spec.md @@ -0,0 +1,23 @@ +## ADDED Requirements + +### Requirement: CNPG batch writes surface per-command errors +The system SHALL consume results for every queued CNPG batch command and MUST surface the first per-command write error to the caller for non-best-effort write paths. + +Batch write error messages MUST include sufficient context to identify which operation failed and which command in the batch produced the error. + +#### Scenario: CloudEvent batch insert returns a constraint violation +- **WHEN** `InsertEvents` queues multiple INSERT commands and one command fails due to a database constraint violation +- **THEN** `InsertEvents` returns an error +- **AND** the error includes the failing batch command index and operation context + +#### Scenario: User batch insert returns an insert error +- **WHEN** `StoreBatchUsers` queues multiple INSERT commands and one command fails due to invalid data +- **THEN** `StoreBatchUsers` returns an error +- **AND** the error includes the failing batch command index and operation context + +### Requirement: Batch results are always closed +The system MUST always close `pgx.BatchResults` after sending a batch, even when a per-command error occurs while reading results. + +#### Scenario: Batch results close on early error +- **WHEN** a per-command error is detected while reading batch results +- **THEN** the system closes the batch results before returning diff --git a/openspec/changes/fix-pgx-batch-error-handling/tasks.md b/openspec/changes/fix-pgx-batch-error-handling/tasks.md new file mode 100644 index 000000000..0e3f13a53 --- /dev/null +++ b/openspec/changes/fix-pgx-batch-error-handling/tasks.md @@ -0,0 +1,13 @@ +## 1. Batch Helper +- [x] 1.1 Add a shared helper in `pkg/db/` to execute all queued batch commands (`br.Exec()` for `batch.Len()` items) and always close the results (`br.Close()`), preserving close errors when no prior error exists. +- [x] 1.2 Add unit tests for the helper with a fake `pgx.BatchResults` that can inject an error at a specific command index. + +## 2. Fix Affected Call Sites +- [x] 2.1 Update `pkg/db/events.go` `InsertEvents` to use the helper and return an error when any queued INSERT fails. +- [x] 2.2 Update `pkg/db/auth.go` `StoreBatchUsers` to use the helper and return an error when any queued INSERT fails. +- [x] 2.3 Update `pkg/db/cnpg_unified_devices.go` `DeleteDevices` audit batch to drain results and log the real insert error(s) (best-effort behavior remains). +- [x] 2.4 Audit the remaining `SendBatch` usages under `pkg/db/**` and either migrate them to the helper or document why they intentionally do not surface errors. + +## 3. Verification +- [x] 3.1 Run `go test ./pkg/db/...` (and any targeted tests added by this change). +- [x] 3.2 Confirm `openspec validate fix-pgx-batch-error-handling --strict` passes. diff --git a/openspec/changes/fix-poller-status-metadata-clobber/design.md b/openspec/changes/fix-poller-status-metadata-clobber/design.md new file mode 100644 index 000000000..fa820caac --- /dev/null +++ b/openspec/changes/fix-poller-status-metadata-clobber/design.md @@ -0,0 +1,43 @@ +## Context +Poller registration metadata (identity/provenance) and poller operational status (health/last-seen) currently share the same `pollers` row in CNPG. Two code paths write that table: +- Service registry registration/upsert (intended to manage registration metadata). +- Poller status updates (intended to manage operational state). + +Today, the poller status UPSERT overwrites registration columns with defaults during conflict updates, which corrupts explicit registrations. + +## Goals / Non-Goals +- Goals: + - Preserve poller registration metadata across status/heartbeat updates. + - Keep status updates cheap (no read-before-write required). + - Maintain a clear “ownership” boundary between registration writes and status writes. +- Non-Goals: + - Redesign the registry schema (separating operational state into a new table). + - Change the edge onboarding data model or token formats. + +## Decisions +- Decision: Treat status/heartbeat writes as *operational-only* updates. + - The status update SQL MUST NOT modify registration metadata on conflict. + - Registration metadata remains managed by the service registry registration/upsert path. + +### Alternatives considered +- Conditional UPSERT for each column (`CASE`/`COALESCE(NULLIF(...))`). + - Pros: single SQL path. + - Cons: ambiguous semantics (cannot distinguish “intentionally set empty” vs “unknown”), increases complexity, easy to regress. +- Read-before-write to preserve fields in application code. + - Pros: explicit. + - Cons: adds a read on hot path and still cannot preserve fields that are not represented in the status model. +- Route all heartbeats through the service registry exclusively. + - Pros: single write system. + - Cons: would require expanding service registry heartbeat semantics to include `is_healthy`, and coordinating ownership across packages. + +## Risks / Trade-offs +- Risk: Call sites that *intended* to update registration metadata via `UpdatePollerStatus` will no longer do so. + - Mitigation: Audit call sites and keep registration metadata updates explicit via the service registry. + +## Migration Plan +No data migration. Existing corrupted rows may require re-registration to restore missing metadata; the change prevents future corruption. + +## Open Questions +- Should `UpdatePollerStatus` be renamed/split to make the “operational-only” semantics impossible to misuse? +- Should we add an integrity check/alert when a poller’s registration metadata changes unexpectedly? + diff --git a/openspec/changes/fix-poller-status-metadata-clobber/proposal.md b/openspec/changes/fix-poller-status-metadata-clobber/proposal.md new file mode 100644 index 000000000..05116b740 --- /dev/null +++ b/openspec/changes/fix-poller-status-metadata-clobber/proposal.md @@ -0,0 +1,20 @@ +# Change: Prevent poller status updates from clobbering registration metadata + +## Why +`UpdatePollerStatus` is called frequently to record operational state (`is_healthy`, `last_seen`) for a poller. Today, that write path overwrites poller registration metadata (`component_id`, `registration_source`, `status`, `spiffe_identity`, `metadata`, and related timestamps) with hardcoded defaults, causing pollers registered via edge onboarding or explicit registration to lose their identity and provenance. + +Reference: GitHub issue `#2151` (Poller status updates overwrite registration metadata with defaults). + +## What Changes +- Update the CNPG poller status UPSERT so that conflict updates only touch operational fields (e.g., `last_seen`, `is_healthy`, `updated_at`) and do **not** overwrite registration metadata. +- Define/clarify write ownership: registration metadata updates flow through the service registry registration path; status/heartbeat updates flow through the poller status path. +- Add regression coverage to prevent reintroducing metadata clobbering. + +## Impact +- Affected specs: `service-registry` (new delta) +- Affected code: + - `pkg/db/cnpg_registry.go` (poller status UPSERT + args) + - `pkg/db/pollers.go` (public DB API semantics) + - `pkg/core/pollers.go` and any other callers that update poller health/last-seen +- No schema changes expected; this is a behavioral fix for write semantics. + diff --git a/openspec/changes/fix-poller-status-metadata-clobber/specs/service-registry/spec.md b/openspec/changes/fix-poller-status-metadata-clobber/specs/service-registry/spec.md new file mode 100644 index 000000000..72b76f266 --- /dev/null +++ b/openspec/changes/fix-poller-status-metadata-clobber/specs/service-registry/spec.md @@ -0,0 +1,27 @@ +# Service Registry: Poller status vs registration writes + +## ADDED Requirements + +### Requirement: Poller status updates preserve registration metadata +When the system records poller operational state (e.g., `is_healthy`, `last_seen`), it SHALL NOT overwrite poller registration metadata (`component_id`, `registration_source`, `status`, `spiffe_identity`, `metadata`, `created_by`, `first_registered`, and `first_seen`). + +#### Scenario: Explicitly registered poller retains metadata after status update +- **GIVEN** poller `edge-poller-01` is explicitly registered with non-default `component_id`, `registration_source`, `spiffe_identity`, and `metadata` +- **WHEN** the poller reports status updates that update `last_seen` and/or `is_healthy` +- **THEN** the stored registration metadata remains unchanged +- **AND** only operational fields (such as `last_seen`, `is_healthy`, `updated_at`) are updated + +#### Scenario: Status updates do not clear first-seen timestamps +- **GIVEN** poller `edge-poller-01` has a non-null `first_registered` and `first_seen` +- **WHEN** a status update occurs where the caller does not provide `first_seen` +- **THEN** `first_registered` and `first_seen` remain unchanged + +### Requirement: Registration writes remain authoritative for registration metadata +When the system performs an explicit poller registration or metadata update (e.g., edge onboarding), it SHALL update poller registration metadata and SHALL NOT be overwritten by subsequent status/heartbeat writes. + +#### Scenario: Explicit registration after implicit status insert updates metadata +- **GIVEN** poller `edge-poller-01` first appears via an implicit status insert (defaults applied) +- **WHEN** an explicit registration is later performed with non-default identity/provenance metadata +- **THEN** the poller record reflects the explicit registration metadata after registration completes +- **AND** subsequent status updates preserve those values + diff --git a/openspec/changes/fix-poller-status-metadata-clobber/tasks.md b/openspec/changes/fix-poller-status-metadata-clobber/tasks.md new file mode 100644 index 000000000..01cddcb05 --- /dev/null +++ b/openspec/changes/fix-poller-status-metadata-clobber/tasks.md @@ -0,0 +1,10 @@ +## 1. Implementation +- [x] 1.1 Update `pkg/db/cnpg_registry.go:upsertPollerStatusSQL` conflict clause to only update operational columns and preserve registration metadata. +- [x] 1.2 Ensure `first_registered` / `first_seen` are not cleared or overwritten by status-only calls (especially when callers omit `FirstSeen`). +- [x] 1.3 Add regression tests that fail if poller registration metadata changes after a status update (explicit registration → status update). +- [x] 1.4 Audit `UpdatePollerStatus` call sites in `pkg/core/**` (and elsewhere) to confirm they are intended to be status/heartbeat updates only. +- [x] 1.5 Confirm service registry registration/upsert paths continue to update registration metadata as intended. + +## 2. Validation +- [x] 2.1 Run `openspec validate fix-poller-status-metadata-clobber --strict`. +- [x] 2.2 Run targeted Go tests for the touched packages (at minimum `go test ./pkg/db/... ./pkg/core/...`). diff --git a/openspec/changes/fix-rbac-route-protection-wildcard-fallback/design.md b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/design.md new file mode 100644 index 000000000..ae5669eec --- /dev/null +++ b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/design.md @@ -0,0 +1,29 @@ +## Context +`pkg/core/auth/rbac.go:getRequiredRoles` determines which RBAC roles are required for a given HTTP path + method using the `rbac.route_protection` config. The current implementation returns immediately on an exact path match, even when that exact match is a method-specific map that does not include the requested method. This causes wildcard protections (for example `/api/admin/*`) to be skipped. + +## Goals / Non-Goals +- Goals: + - Prevent method-specific exact matches from bypassing wildcard protections when the method is not explicitly protected by the exact match. + - Preserve the ability for method-specific exact matches to override wildcard protections when the method is explicitly listed. + - Add tests that fail if this bypass reappears. +- Non-Goals: + - Redesign the RBAC configuration schema. + - Change the middleware semantics where “no required roles” means “no role check”. + - Define or implement deterministic precedence between multiple overlapping wildcard patterns (unless required by follow-up work). + +## Decisions +- Decision: Treat an exact path protection as applicable only if it yields at least one role for the requested method. + - Rationale: Method-specific maps are intended to scope protection to listed methods. Returning an empty role list should not disable more general protections that still apply. + - Result: `getRequiredRoles` will compute roles from the exact match and return them only when non-empty; otherwise it will continue evaluating wildcard patterns. + +## Risks / Trade-offs +- Risk: Some deployments may have relied (unknowingly) on the bypass for access patterns. + - Mitigation: Treat this as a security bug fix; document the change and add tests demonstrating the intended behavior. + +## Migration Plan +- No storage migration. +- Code-only change in `pkg/core/auth`; roll out as a patch release after validation. + +## Open Questions +- Should wildcard precedence be deterministic when multiple patterns match (for example, prefer the most-specific prefix match)? +- Should method maps support an explicit “default/any method” key (for example `"*"`), and how should it interact with wildcards? diff --git a/openspec/changes/fix-rbac-route-protection-wildcard-fallback/proposal.md b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/proposal.md new file mode 100644 index 000000000..110e137c0 --- /dev/null +++ b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/proposal.md @@ -0,0 +1,19 @@ +# Change: Fix RBAC route protection wildcard fallback for method-specific exact matches + +## Why +GitHub issue #2147 (https://github.com/carverauto/serviceradar/issues/2147) reports a security bug in `pkg/core/auth/rbac.go:getRequiredRoles`: when an exact-path `route_protection` entry exists but does not define roles for the requested HTTP method, the code returns an empty role list and bypasses wildcard protections (for example `/api/admin/*`). This can unintentionally grant access to protected routes for authenticated users without the expected roles. + +## What Changes +- Update RBAC route role resolution so exact path matches only take precedence when they yield required roles for the requested HTTP method; otherwise wildcard protections are still evaluated. +- Add regression tests covering exact-match + method-map + wildcard combinations to prevent future bypasses. +- Document the intended precedence rules for `route_protection` entries (exact vs wildcard; method-specific vs array). + +## Impact +- Affected specs: `rbac-route-protection` (new) +- Affected code: `pkg/core/auth/rbac.go`, `pkg/core/auth/*_test.go` (new/updated) +- Compatibility: This is a security-hardening bug fix. Configurations that unintentionally relied on the bypass (exact match missing method allowing access) will become more restrictive. +- Out of scope: Changing the `route_protection` schema, changing the meaning of “empty required roles” beyond the wildcard fallback behavior, or introducing deterministic precedence between multiple overlapping wildcard patterns. + +## Success Criteria +- A request to a route matched by a wildcard protection does not become unprotected just because an exact path entry exists for the same route without roles for that method. +- Unit tests reproduce the bypass case from issue #2147 and pass with the fix. diff --git a/openspec/changes/fix-rbac-route-protection-wildcard-fallback/specs/rbac-route-protection/spec.md b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/specs/rbac-route-protection/spec.md new file mode 100644 index 000000000..4e8d6dc31 --- /dev/null +++ b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/specs/rbac-route-protection/spec.md @@ -0,0 +1,28 @@ +## ADDED Requirements + +### Requirement: Route protection falls back from exact method maps to wildcard patterns +When `rbac.route_protection` contains both wildcard patterns (for example `/api/admin/*`) and an exact path entry for a concrete route, the core API MUST NOT bypass wildcard protections solely due to the presence of the exact path entry. If the exact path entry is a method-specific map and does not define roles for the requested HTTP method, the core API MUST continue evaluating wildcard patterns and apply any matching wildcard protection. + +#### Scenario: Exact match missing method falls back to wildcard protection +- **GIVEN** `rbac.route_protection` includes `/api/admin/*: ["admin"]` +- **AND** `rbac.route_protection` includes an exact entry for `/api/admin/users` with method-specific roles that only define `POST: ["superadmin"]` +- **WHEN** a request is made to `GET /api/admin/users` +- **THEN** the required roles include `admin` (from the wildcard protection) + +### Requirement: Method-specific exact matches override wildcard protection when defined +When an exact path entry defines roles for the requested HTTP method, those roles MUST be used in preference to roles from wildcard protections. + +#### Scenario: Exact match method roles override wildcard roles +- **GIVEN** `rbac.route_protection` includes `/api/admin/*: ["admin"]` +- **AND** `rbac.route_protection` includes an exact entry for `/api/admin/users` with method-specific roles that define `POST: ["superadmin"]` +- **WHEN** a request is made to `POST /api/admin/users` +- **THEN** the required roles include `superadmin` +- **AND** the required roles do not fall back to `admin` for that request + +### Requirement: RBAC includes regression tests for route protection resolution +The core RBAC implementation MUST include unit tests that cover precedence and fallback behavior between exact path entries and wildcard patterns in `rbac.route_protection`. + +#### Scenario: Regression tests detect wildcard bypass +- **GIVEN** a test configuration that includes a wildcard protection and an exact method map that does not define the requested method +- **WHEN** the route protection resolution is exercised by tests +- **THEN** tests fail if required roles are empty when a matching wildcard protection exists diff --git a/openspec/changes/fix-rbac-route-protection-wildcard-fallback/tasks.md b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/tasks.md new file mode 100644 index 000000000..58ef85b65 --- /dev/null +++ b/openspec/changes/fix-rbac-route-protection-wildcard-fallback/tasks.md @@ -0,0 +1,15 @@ +# Tasks: Fix RBAC route protection wildcard fallback for method-specific exact matches + +## 1. Define expected behavior +- [x] 1.1 Capture precedence rules for `route_protection` (exact match vs wildcard; array vs method map). +- [x] 1.2 Identify affected configs (core.json, Helm config) and confirm they are compatible with the corrected behavior. + +## 2. Implement the fix +- [x] 2.1 Update `getRequiredRoles` to fall back to wildcard matches when an exact match yields no roles for the requested method. +- [x] 2.2 Ensure the fix preserves method-specific overrides when roles are defined for the method. + +## 3. Tests and validation +- [x] 3.1 Add unit tests reproducing the bypass scenario from GitHub issue #2147. +- [x] 3.2 Add unit tests proving method-specific exact matches still override wildcard roles for the same path/method. +- [x] 3.3 Run `gofmt` and targeted `go test ./pkg/core/auth`. +- [x] 3.4 Run `openspec validate fix-rbac-route-protection-wildcard-fallback --strict`. diff --git a/openspec/changes/fix-registry-ip-canonicalization/proposal.md b/openspec/changes/fix-registry-ip-canonicalization/proposal.md deleted file mode 100644 index c85121a87..000000000 --- a/openspec/changes/fix-registry-ip-canonicalization/proposal.md +++ /dev/null @@ -1,144 +0,0 @@ -# Change: Fix IP conflict resolution to respect Armis device identity - -## Why - -Demo inventory collapsed to **362 devices out of 50,002**. Investigation revealed the root cause is **not** missing tombstone filtering (that filtering already exists), but rather that **IP conflict resolution ignores strong identity (Armis ID)**, causing distinct devices to be incorrectly merged when IPs are reassigned. - -### The Real Problem: IP Churn Causes Wrong Merges - -Our source system (Armis) polls devices at regular intervals. Due to DHCP and IP churn, devices occasionally change IP addresses. The `armis_device_id` is stable and should be the authoritative identifier—when we detect an IP change, we should simply update our record with the new IP. - -**Current behavior (WRONG):** - -When `resolveIPConflictsWithDB()` at `registry.go:522` detects an IP conflict: -``` -DB state: sr:AAA (armis_id=X, IP=10.0.0.1) -Incoming: sr:BBB (armis_id=Y, IP=10.0.0.1) ← different Armis device! -``` - -The code: -1. Sees IP conflict: "10.0.0.1 already belongs to sr:AAA" -2. **Tombstones sr:BBB → sr:AAA** (WRONG—these are different devices!) -3. Merges B's metadata (including `armis_id=Y`) into sr:AAA - -**Result:** -- sr:AAA now has conflicting Armis IDs (corrupted identity) -- sr:BBB (the real device Y) is tombstoned to the wrong device -- Device Y is effectively lost from inventory - -### IP Churn Scenario - -``` -T=0: Armis poll - Device X (armis_id=X) has IP=10.0.0.1 → sr:AAA - Device Y (armis_id=Y) has IP=10.0.0.2 → sr:BBB - -T=1: IP churn (DHCP reassignment) - Device X now has IP=10.0.0.2 - Device Y now has IP=10.0.0.1 - -T=2: Next Armis poll arrives - Device X (armis_id=X, IP=10.0.0.2) - should update sr:AAA - Device Y (armis_id=Y, IP=10.0.0.1) - should update sr:BBB - - But sr:AAA still has IP=10.0.0.1 in DB! - - If Y is processed before X's IP update: - → IP conflict detected for 10.0.0.1 - → sr:BBB tombstoned to sr:AAA (WRONG!) - → Device Y lost, sr:AAA corrupted with Y's identity -``` - -### Why Tombstone Cascade Happens - -1. Device Y (sr:BBB) gets tombstoned to Device X (sr:AAA) — wrong merge -2. Next poll: Y's update tries to resolve `armis_id=Y`, but sr:BBB is tombstoned -3. System generates new sr:CCC for Y -4. IP conflict again with whoever currently has Y's IP -5. sr:CCC tombstoned to another wrong device -6. Repeat until inventory collapses - -This explains: -- **49,641 rows with `_merged_into` set** — cascading wrong merges -- **49,510 tombstones pointing to non-existent targets** — targets themselves got tombstoned -- **`_merged_into` cycles** — devices merged in both directions due to IP swaps - -### Previous Analysis Was Incorrect - -The original proposal claimed "Registry IP resolution code does not filter out tombstoned rows." This is **no longer accurate**: - -- `unifiedDevicesSelection` at `pkg/db/cnpg_unified_devices.go:54-56` already filters tombstones -- All identity resolution queries (`queryDeviceIDsByMAC`, `queryDeviceIDsByArmisID`, etc.) include tombstone filtering -- `resolveCanonicalIPs()` explicitly checks `isCanonicalUnifiedDevice()` - -The tombstone filtering is working correctly. The problem is **upstream**: wrong merges are being created in the first place. - -## What Changes - -### 1. IP conflict resolution must respect strong identity - -Modify `resolveIPConflictsWithDB()` at `registry.go:465-585` to check Armis ID before merging: - -```go -// Current (WRONG): -if existingDeviceID, exists := existingByIP[update.IP]; exists && existingDeviceID != update.DeviceID { - // Tombstone new device to existing ← WRONG if different Armis IDs! -} - -// Fixed: -if existingDeviceID, exists := existingByIP[update.IP]; exists && existingDeviceID != update.DeviceID { - existingArmisID := getArmisID(existingDevice) - updateArmisID := getArmisID(update) - - if existingArmisID != "" && updateArmisID != "" && existingArmisID != updateArmisID { - // DIFFERENT Armis devices sharing IP due to churn - // The existing device's IP is stale—clear it, let new device have the IP - emitIPClearUpdate(existingDeviceID) - // Process update normally (no tombstone) - } else { - // Same device or no strong identity—existing merge logic OK - } -} -``` - -### 2. Batch deduplication must respect strong identity - -Apply same logic to `deduplicateBatch()` at `registry.go:389-463`: -- When two updates in the same batch have the same IP but different Armis IDs, they are **different devices** -- Only the most recent update (by timestamp or batch order) should claim the IP -- The other device should have its IP cleared, not be tombstoned - -### 3. Identity-first resolution order - -Ensure `DeviceIdentityResolver.ResolveDeviceIDs()` resolves by Armis ID **before** any IP-based conflict resolution runs, so devices are correctly identified before IP deduplication. - -### 4. Add diagnostic logging - -Log when IP conflict resolution encounters different Armis IDs to help diagnose IP churn patterns: -``` -level=info msg="IP reassignment detected" ip=10.0.0.1 old_device=sr:AAA old_armis=X new_device=sr:BBB new_armis=Y -``` - -## Impact - -- **Affected specs**: device-identity-reconciliation -- **Affected code**: - - `pkg/registry/registry.go`: `resolveIPConflictsWithDB()`, `deduplicateBatch()` - - `pkg/registry/device_identity.go`: Ensure Armis ID resolution happens first -- **Risk**: Medium — changes IP conflict handling logic, but targeted to check strong identity before merging - -## Data Model Clarification - -For our use case: -- **Armis ID** is the stable, authoritative device identifier from the source system -- **ServiceRadar ID** (`sr:...`) is our internal stable identifier, mapped 1:1 from Armis ID -- **IP address** is a mutable attribute that can change due to DHCP/churn -- **Tombstones** should only be created when migrating legacy IDs to `sr:` format, NOT for IP conflicts between distinct devices - -## Data Repair - -After deploying the fix: -1. Identify devices with multiple/conflicting Armis IDs in metadata (corruption from wrong merges) -2. Clear `_merged_into` on devices that were incorrectly tombstoned -3. Re-poll from Armis to restore correct device inventory -4. Alternatively: reseed faker data for clean slate on demo diff --git a/openspec/changes/fix-registry-ip-canonicalization/specs/device-identity-reconciliation/spec.md b/openspec/changes/fix-registry-ip-canonicalization/specs/device-identity-reconciliation/spec.md deleted file mode 100644 index 745f6a888..000000000 --- a/openspec/changes/fix-registry-ip-canonicalization/specs/device-identity-reconciliation/spec.md +++ /dev/null @@ -1,81 +0,0 @@ -## ADDED Requirements - -### Requirement: Strong identity (Armis ID) takes precedence over IP in conflict resolution - -The registry SHALL use Armis ID as the authoritative device identifier. When an IP conflict occurs between two devices with different Armis IDs, the system SHALL NOT merge/tombstone either device but instead reassign the IP to the device that currently owns it according to the source system. - -#### Scenario: IP reassignment between different Armis devices -- **GIVEN** `unified_devices` contains device sr:AAA with `armis_device_id=X` and `IP=10.0.0.1` -- **AND** an update arrives for device sr:BBB with `armis_device_id=Y` and `IP=10.0.0.1` -- **WHEN** the registry processes the update -- **THEN** sr:BBB is NOT tombstoned to sr:AAA (they are different devices) -- **AND** sr:AAA's IP is cleared (its IP is now stale) -- **AND** sr:BBB receives IP=10.0.0.1 -- **AND** both devices remain canonical in inventory - -#### Scenario: Same Armis device with changed IP -- **GIVEN** `unified_devices` contains device sr:AAA with `armis_device_id=X` and `IP=10.0.0.1` -- **AND** an update arrives with `armis_device_id=X` and `IP=10.0.0.2` -- **WHEN** the registry processes the update -- **THEN** sr:AAA's IP is updated to 10.0.0.2 -- **AND** no tombstone is created -- **AND** the device remains canonical - -#### Scenario: Duplicate update for same device (same IP, same Armis ID) -- **GIVEN** `unified_devices` contains device sr:AAA with `armis_device_id=X` and `IP=10.0.0.1` -- **AND** an update arrives with `armis_device_id=X` and `IP=10.0.0.1` -- **WHEN** the registry processes the update -- **THEN** sr:AAA is updated (last_seen, metadata, etc.) -- **AND** no new device is created -- **AND** no tombstone is created - -### Requirement: IP conflicts within a batch respect strong identity - -When multiple updates in the same batch have the same IP but different Armis IDs, the system SHALL treat them as distinct devices and resolve the IP ownership based on the source system's current state, not batch ordering. - -#### Scenario: Intra-batch IP conflict with different Armis IDs -- **GIVEN** a batch contains: - - Update A: `armis_device_id=X`, `IP=10.0.0.1`, `timestamp=T1` - - Update B: `armis_device_id=Y`, `IP=10.0.0.1`, `timestamp=T2` (T2 > T1) -- **WHEN** the registry processes the batch -- **THEN** device Y receives IP=10.0.0.1 (most recent timestamp) -- **AND** device X has its IP cleared (not tombstoned) -- **AND** both devices remain in inventory as canonical - -#### Scenario: Intra-batch IP conflict with same Armis ID -- **GIVEN** a batch contains duplicate updates for the same device: - - Update A: `armis_device_id=X`, `IP=10.0.0.1`, `timestamp=T1` - - Update B: `armis_device_id=X`, `IP=10.0.0.1`, `timestamp=T2` -- **WHEN** the registry processes the batch -- **THEN** only one device record exists (deduplicated by Armis ID) -- **AND** the most recent update's data is used - -### Requirement: Tombstones are only for ID migration, not IP conflicts - -The system SHALL only create tombstones (`_merged_into`) when migrating a device from a legacy ID format to a ServiceRadar UUID. Tombstones SHALL NOT be created due to IP address conflicts between distinct devices. - -#### Scenario: Legacy ID migration creates tombstone -- **GIVEN** a device exists with legacy ID `default:10.0.0.1` -- **AND** an update arrives with `armis_device_id=X` resolving to `sr:AAA` -- **WHEN** the registry processes the update -- **THEN** a tombstone is created: `default:10.0.0.1` → `sr:AAA` -- **AND** future lookups for the legacy ID resolve to sr:AAA - -#### Scenario: IP conflict does NOT create tombstone -- **GIVEN** device sr:AAA has IP=10.0.0.1 -- **AND** device sr:BBB (different Armis ID) arrives with IP=10.0.0.1 -- **WHEN** the registry processes the update -- **THEN** no tombstone is created -- **AND** both sr:AAA and sr:BBB remain canonical devices - -### Requirement: IP can be cleared without tombstoning - -The system SHALL support clearing a device's IP address (marking it as stale) without tombstoning the device. This allows IP addresses to be reassigned between devices during DHCP churn. - -#### Scenario: IP cleared from device with stale IP -- **GIVEN** device sr:AAA has IP=10.0.0.1 but Armis reports a different device now has that IP -- **WHEN** the registry clears sr:AAA's IP -- **THEN** sr:AAA remains a canonical device in inventory -- **AND** sr:AAA's IP field is empty or marked as stale -- **AND** sr:AAA can receive a new IP in a future update -- **AND** the IP=10.0.0.1 is available for assignment to another device diff --git a/openspec/changes/fix-registry-ip-canonicalization/tasks.md b/openspec/changes/fix-registry-ip-canonicalization/tasks.md deleted file mode 100644 index 16ce24bba..000000000 --- a/openspec/changes/fix-registry-ip-canonicalization/tasks.md +++ /dev/null @@ -1,43 +0,0 @@ -## 1. Implementation - -### 1.1 IP Conflict Resolution with Strong Identity Check -- [x] Modify `resolveIPConflictsWithDB()` in `pkg/registry/registry.go:465-585` to check strong identity (Armis ID, Netbox ID, MAC) before tombstoning: - - Fetch existing device's full record when IP conflict detected - - If both devices have different strong identities: clear IP from existing device instead of tombstoning new device - - If same identity or no strong identity: use existing merge logic -- [x] Add helper function `getStrongIdentity(update *models.DeviceUpdate)` and `getStrongIdentityFromDevice(device *models.UnifiedDevice)` to fetch authoritative IDs - -### 1.2 Batch Deduplication with Strong Identity Check -- [x] Modify `deduplicateBatch()` in `pkg/registry/registry.go:389-463` to respect strong identity: - - When IP collision detected within batch, compare strong identities - - If different identities: newer update claims IP, emit IP-clear update for older device (not tombstone) - - If same identity: existing first-wins logic is correct (same device, duplicate update) - -### 1.3 IP Clear Update Mechanism -- [x] Implement IP clearing by issuing a DeviceUpdate with `IP: "0.0.0.0"` and `_ip_cleared_due_to_churn` metadata. - - Allows the device to remain canonical but release the IP for reassignment - - Does NOT set `_merged_into` (device is not being merged, just losing its stale IP) - -### 1.4 Diagnostic Logging -- [x] Add info-level logging when IP reassignment is detected between different devices: - ``` - level=info msg="IP reassignment detected in batch (strong identity mismatch)" ip=10.0.0.1 old_device=sr:AAA old_identity=X new_device=sr:BBB new_identity=Y - ``` - -## 2. Testing - -- [x] Add unit test: Two devices with different Armis IDs, same IP → no tombstone, IP cleared from old device -- [x] Add unit test: `TestDeduplicateBatchMergesStrongIdentityByIP` updated to verify IP clearing on mismatch -- [x] Add unit test: Two devices with same Armis ID, same IP → merge is correct (duplicate update) -- [x] Add unit test: Device with no Armis ID vs device with Armis ID, same IP → merge is correct (weak upgraded to strong) - -## 3. Data Repair / Verification - -- [ ] Write SQL query to identify devices with multiple/conflicting `armis_device_id` values in metadata -- [ ] Write SQL query to find tombstones where source and target have different Armis IDs (incorrect merges) -- [ ] Document cleanup procedure: - 1. Clear `_merged_into` on incorrectly tombstoned devices - 2. Deduplicate devices that were created after their original was tombstoned - 3. Re-poll from Armis to restore correct state -- [ ] Validate on demo: device inventory returns to ~50k devices after fix + cleanup -- [ ] Alternative: document reseed procedure for faker data on demo namespace diff --git a/openspec/changes/fix-service-device-sightings/proposal.md b/openspec/changes/fix-service-device-sightings/proposal.md deleted file mode 100644 index 998d8c234..000000000 --- a/openspec/changes/fix-service-device-sightings/proposal.md +++ /dev/null @@ -1,14 +0,0 @@ -# Change: Fix Service Device Sightings and Identity Reporting - -## Why -Pollers and agents are being demoted to sightings because identity reconciliation treats their self-reported updates as weak signals, and source_ip often arrives as `auto`/empty. Their IP/hostname and partition data never stick, so promoted devices fall back into the sightings list instead of staying in inventory. - -## What Changes -- Treat ServiceRadar service updates (poller/agent/checker and host self-registration) as authoritative devices, bypassing the sighting ingest path. -- Ensure pollers/agents send normalized source_ip + hostname and core fills missing values from pod/host metadata so devices carry real identity data. -- Align partition/identity metadata so service components land in the default partition with stable IDs rather than ambiguous `Serviceradar` sightings. -- Add regression tests to cover the service-device path while identity reconciliation is enabled. - -## Impact -- Affected specs: `device-identity-reconciliation` -- Affected code: registry ProcessBatchDeviceUpdates/hasStrongIdentity, poller source IP resolution and registration, service device partition handling, identity tests/UI around sightings vs devices. diff --git a/openspec/changes/fix-service-device-sightings/specs/device-identity-reconciliation/spec.md b/openspec/changes/fix-service-device-sightings/specs/device-identity-reconciliation/spec.md deleted file mode 100644 index c046cf266..000000000 --- a/openspec/changes/fix-service-device-sightings/specs/device-identity-reconciliation/spec.md +++ /dev/null @@ -1,14 +0,0 @@ -## ADDED Requirements -### Requirement: Service Components Remain Devices -Self-reported ServiceRadar components (pollers, agents, checkers, host registrations) SHALL be treated as authoritative device updates and SHALL bypass network sighting demotion so they remain in inventory with stable ServiceRadar IDs. - -#### Scenario: Poller status ingested under identity reconciliation -- **WHEN** a poller or agent reports status with its service identifiers while identity reconciliation is enabled -- **THEN** the registry records or refreshes the corresponding service device directly (skipping sighting ingest) so it stays visible as a device instead of reappearing as a sighting - -### Requirement: Service Components Report Normalized Host Identity -ServiceRadar components SHALL send normalized source IPs and hostnames for their own hosts, and the system SHALL resolve missing values from runtime or service-registry metadata so service devices carry IP and hostname data in the default partition. - -#### Scenario: Source IP is missing or a placeholder -- **WHEN** a poller status arrives with an empty or placeholder source_ip -- **THEN** the system resolves a concrete IP and hostname (for example the pod or node address), registers the host/device in the poller partition (default), and the resulting device entry shows the resolved IP/hostname rather than an empty or `Serviceradar` sighting diff --git a/openspec/changes/fix-service-device-sightings/tasks.md b/openspec/changes/fix-service-device-sightings/tasks.md deleted file mode 100644 index 5e79d8c68..000000000 --- a/openspec/changes/fix-service-device-sightings/tasks.md +++ /dev/null @@ -1,17 +0,0 @@ -## 1. Investigation and plan -- [x] 1.1 Capture current poller/agent status payloads and registry behavior with identity reconciliation enabled to confirm sighting demotion and missing IP/hostname. (Covered via registry/poller unit tests asserting service updates bypass sightings and IPs/hostnames get resolved.) -- [x] 1.2 Decide partition and identity strategy for service devices (host device vs ServiceRadar IDs) and expected visibility in the default partition. (Decision: keep `serviceradar:` device IDs but explicitly place service devices in the `default` partition field; fall back to stored poller status for missing IPs.) - -## 2. Fixes -- [x] 2.1 Update the registry so ServiceRadar service updates and self-reported host registrations bypass sighting ingest and stay as authoritative devices; add unit tests. -- [x] 2.2 Harden poller/agent source IP + hostname resolution (use concrete IPv4/pod IP, normalize, propagate) so device updates carry real identity data; add tests. -- [x] 2.3 Align partition metadata for service components so they appear in the default partition without creating duplicate IDs; add regression coverage. - -## 3. Validation -- [x] 3.1 Add/refresh tests covering poller/agent → registry identity flow under identity reconciliation (no service sightings, IP/hostname present). -- [x] 3.2 Run `openspec validate fix-service-device-sightings --strict` and address any spec lint issues. - -## 4. Deployment and verification (in progress) -- [ ] 4.1 Deploy latest ICMP/poller/agent identity fixes to demo with images built from `32db2915ca79741bf551ad8af98d1ce359ce46f8` (core `sha-88d3a8af915b...`, web `sha-ea0415aa1069...`, poller `sha-bccc4567ef2a...`, agent `sha-9c92617fce5f...`, datasvc `sha-bdc0057ce88c...`, sync `sha-022d570f8aeb...`, snmp-checker `sha-b32c3d1c9923...`, srql `sha-1a10f7b7285...`, tools `sha-36d2645dd65...`). -- [ ] 4.2 Verify in demo UI/inventory that ICMP capability is attached only to the agent device (no poller ICMP sparkline) and that the poller is marked available when reporting. -- [ ] 4.3 Resolve Helm pre-upgrade hook failure: `serviceradar-secret-generator` job currently `ImagePullBackOff` in `demo`, leaving the last upgrade partially applied and pods still on prior (8h-old) images. diff --git a/openspec/changes/fix-snmp-check-deadlock/proposal.md b/openspec/changes/fix-snmp-check-deadlock/proposal.md new file mode 100644 index 000000000..853d10e58 --- /dev/null +++ b/openspec/changes/fix-snmp-check-deadlock/proposal.md @@ -0,0 +1,16 @@ +# Change: Fix SNMP checker health check deadlock + +## Why +GitHub issue `#2141` reports a deadlock in the SNMP checker service caused by recursive `sync.RWMutex` read locking: `SNMPService.Check()` acquires `RLock()` and then calls `GetStatus()`, which also calls `RLock()`. If a writer is waiting (e.g., `handleDataPoint()` attempting `Lock()`), Go’s write-preferring `RWMutex` blocks new readers, so the nested `RLock()` blocks indefinitely while still holding the outer `RLock()`. This can hang health checks and make the SNMP checker unresponsive. + +## What Changes +- Update `SNMPService.Check()` and/or `SNMPService.GetStatus()` so health checks cannot deadlock under concurrent datapoint updates. +- Add regression test coverage that reproduces the deadlock scenario and verifies the fix. + +## Impact +- Affected specs: `snmp-checker` +- Affected code: + - `pkg/checker/snmp/service.go` + - `pkg/checker/snmp/*_test.go` +- Risk: low; change is localized to lock usage in health/status paths. + diff --git a/openspec/changes/fix-snmp-check-deadlock/specs/snmp-checker/spec.md b/openspec/changes/fix-snmp-check-deadlock/specs/snmp-checker/spec.md new file mode 100644 index 000000000..fb250a400 --- /dev/null +++ b/openspec/changes/fix-snmp-check-deadlock/specs/snmp-checker/spec.md @@ -0,0 +1,10 @@ +## ADDED Requirements + +### Requirement: SNMP checker health checks are deadlock-free +The SNMP checker service MUST allow `Check()` and `GetStatus()` to execute concurrently with datapoint processing without deadlocking. + +#### Scenario: Health check during concurrent datapoint updates +- **GIVEN** the SNMP checker is processing datapoints (updating internal status) +- **WHEN** a health check calls `Check()` concurrently with datapoint updates +- **THEN** `Check()` returns a result without blocking indefinitely + diff --git a/openspec/changes/fix-snmp-check-deadlock/tasks.md b/openspec/changes/fix-snmp-check-deadlock/tasks.md new file mode 100644 index 000000000..2ce3ef2db --- /dev/null +++ b/openspec/changes/fix-snmp-check-deadlock/tasks.md @@ -0,0 +1,12 @@ +## 1. Fix +- [x] 1.1 Remove recursive `RWMutex` read locking in `SNMPService.Check()` (do not call `GetStatus()` while holding `RLock()`, or refactor to share a locked snapshot) +- [x] 1.2 Ensure status snapshot returned by `Check()` remains thread-safe and consistent with `GetStatus()` output expectations + +## 2. Tests +- [x] 2.1 Add a regression test that prevents `Check()` from combining `s.mu.RLock()` with a call to `GetStatus()` (static AST assertion) +- [x] 2.2 Ensure the regression test fails on the pre-fix implementation and passes post-fix + +## 3. Validation +- [x] 3.1 Run `go test ./pkg/checker/snmp/...` +- [x] 3.2 Run `make lint` +- [x] 3.3 Run `openspec validate fix-snmp-check-deadlock --strict` diff --git a/openspec/changes/fix-snmp-pdu-string-conversion/proposal.md b/openspec/changes/fix-snmp-pdu-string-conversion/proposal.md new file mode 100644 index 000000000..5b2efc177 --- /dev/null +++ b/openspec/changes/fix-snmp-pdu-string-conversion/proposal.md @@ -0,0 +1,17 @@ +# Change: Fix SNMP PDU string conversion panics + +## Why +GitHub issue `#2154` reports the SNMP client conversion layer panicking when decoding `gosnmp.SnmpPDU` values for `OctetString` and `ObjectDescription`. The gosnmp library returns these values as `[]byte`, but `pkg/checker/snmp/client.go` currently asserts `byte`, causing `interface conversion: interface {} is []uint8, not uint8` panics. This breaks common OIDs like `sysDescr` and can crash SNMP polling in normal configurations. + +## What Changes +- Fix `OctetString` and `ObjectDescription` conversions to correctly decode `[]byte` into Go `string`. +- Ensure unexpected `Value` types for these ASN.1 types do not panic; conversion MUST fail with an error that callers can handle. +- Add regression tests covering `convertVariable()` for `OctetString` and `ObjectDescription` (and the non-panicking error case). + +## Impact +- Affected specs: `snmp-checker` +- Affected code: + - `pkg/checker/snmp/client.go` + - `pkg/checker/snmp/*_test.go` +- Risk: low; change is localized to SNMP value decoding and is covered by focused regression tests. + diff --git a/openspec/changes/fix-snmp-pdu-string-conversion/specs/snmp-checker/spec.md b/openspec/changes/fix-snmp-pdu-string-conversion/specs/snmp-checker/spec.md new file mode 100644 index 000000000..5ad1214cc --- /dev/null +++ b/openspec/changes/fix-snmp-pdu-string-conversion/specs/snmp-checker/spec.md @@ -0,0 +1,20 @@ +## ADDED Requirements + +### Requirement: SNMP PDU string types convert without panics +The SNMP checker MUST convert gosnmp PDU values of type `OctetString` and `ObjectDescription` into Go strings without panicking. + +#### Scenario: Convert OctetString value returned as bytes +- **GIVEN** an SNMP response variable with Type `OctetString` and Value `[]byte("Test SNMP String")` +- **WHEN** the SNMP client converts the variable +- **THEN** the conversion result is the string `"Test SNMP String"` and conversion returns no error + +#### Scenario: Convert ObjectDescription value returned as bytes +- **GIVEN** an SNMP response variable with Type `ObjectDescription` and Value `[]byte("Device OS v1.2.3")` +- **WHEN** the SNMP client converts the variable +- **THEN** the conversion result is the string `"Device OS v1.2.3"` and conversion returns no error + +#### Scenario: Unexpected value type does not crash the checker +- **GIVEN** an SNMP response variable with Type `OctetString` or `ObjectDescription` and a Value that is not a `[]byte` +- **WHEN** the SNMP client converts the variable +- **THEN** conversion returns an error and the checker does not panic + diff --git a/openspec/changes/fix-snmp-pdu-string-conversion/tasks.md b/openspec/changes/fix-snmp-pdu-string-conversion/tasks.md new file mode 100644 index 000000000..53e2b683c --- /dev/null +++ b/openspec/changes/fix-snmp-pdu-string-conversion/tasks.md @@ -0,0 +1,13 @@ +## 1. Fix +- [x] 1.1 Update SNMP PDU conversion for `gosnmp.OctetString` and `gosnmp.ObjectDescription` to decode `[]byte` into `string` without panicking +- [x] 1.2 Ensure conversion returns an error (not a panic) when `variable.Value` is not the expected Go type for `OctetString`/`ObjectDescription` + +## 2. Tests +- [x] 2.1 Add a regression test for `convertVariable()` converting `OctetString` values returned as `[]byte` +- [x] 2.2 Add a regression test for `convertVariable()` converting `ObjectDescription` values returned as `[]byte` +- [x] 2.3 Add a regression test that an unexpected `Value` type for these string ASN.1 types returns an error and does not panic + +## 3. Validation +- [x] 3.1 Run `go test ./pkg/checker/snmp/...` +- [x] 3.2 Run `make lint` +- [x] 3.3 Run `openspec validate fix-snmp-pdu-string-conversion --strict` diff --git a/openspec/changes/fix-srql-filter-error-handling/proposal.md b/openspec/changes/fix-srql-filter-error-handling/proposal.md deleted file mode 100644 index 502ced3b9..000000000 --- a/openspec/changes/fix-srql-filter-error-handling/proposal.md +++ /dev/null @@ -1,39 +0,0 @@ -## Why -- Nine query modules in the SRQL translator silently ignore unknown filter fields via `_ => {}` catch-all match arms, while six other modules correctly return `ServiceError::InvalidRequest`. -- When a user mistypes a query filter (e.g., `in:events severty:error` instead of `severity:error`), the API silently drops the invalid filter and returns *all* rows matching other criteria, which can leak sensitive data or produce confusing results. -- Inconsistent error handling violates the principle of least surprise and makes debugging user queries harder—some endpoints fail fast while others fail silently. - -## What Changes -1. Replace the `_ => {}` catch-all in the `apply_filter` function of each affected query module with an explicit error return that names the unsupported field, matching the pattern used in `devices.rs` and `events.rs`. -2. Add unit tests for each fixed module asserting that queries with unknown filter fields return a 400-level error rather than silently succeeding. -3. Update the SRQL language reference documentation to clarify supported fields per entity, so users know which filters are valid. - -### Affected Files -| File | Line | Current Behavior | -|------|------|------------------| -| `rust/srql/src/query/logs.rs` | 235 | Silent ignore | -| `rust/srql/src/query/timeseries_metrics.rs` | 120 | Silent ignore | -| `rust/srql/src/query/cpu_metrics.rs` | 191 | Silent ignore | -| `rust/srql/src/query/traces.rs` | 111 | Silent ignore | -| `rust/srql/src/query/services.rs` | 105 | Silent ignore | -| `rust/srql/src/query/otel_metrics.rs` | 145 | Silent ignore | -| `rust/srql/src/query/pollers.rs` | 103 | Silent ignore | -| `rust/srql/src/query/memory_metrics.rs` | 128 | Silent ignore | -| `rust/srql/src/query/disk_metrics.rs` | 133 | Silent ignore | - -### Reference Implementation -Files with correct error handling that should be used as reference: -- `rust/srql/src/query/devices.rs:175-179` -- `rust/srql/src/query/events.rs:120-124` -- `rust/srql/src/query/interfaces.rs:214-216` -- `rust/srql/src/query/device_graph.rs:94-96` -- `rust/srql/src/query/device_updates.rs:120-122` - -## Impact -- **Breaking Change (by design):** Queries that previously succeeded silently with invalid filters will now return HTTP 400 errors. This is the correct behavior and surfaces bugs in client code or user queries that were previously hidden. -- No schema changes or migrations required. -- Minimal code changes—each fix is a 3-4 line replacement of the catch-all arm. -- Test coverage additions ensure the fix does not regress. - -## Related -- GitHub Issue: #2049 diff --git a/openspec/changes/fix-srql-filter-error-handling/specs/srql/spec.md b/openspec/changes/fix-srql-filter-error-handling/specs/srql/spec.md deleted file mode 100644 index 4fe144220..000000000 --- a/openspec/changes/fix-srql-filter-error-handling/specs/srql/spec.md +++ /dev/null @@ -1,70 +0,0 @@ -## ADDED Requirements - -### Requirement: SRQL filter validation rejects unknown fields with explicit errors -All SRQL query modules MUST reject unknown filter field names with an explicit `InvalidRequest` error that names the unsupported field, rather than silently ignoring the filter and returning unfiltered results. - -#### Scenario: Logs query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `logs` entity with a filter on a non-existent field (e.g., `severty:error`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'severty'` and does not return any log rows. - -#### Scenario: Traces query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `traces` entity with a filter on a non-existent field (e.g., `spn_id:abc`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'spn_id'` and does not return any trace rows. - -#### Scenario: Services query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `services` entity with a filter on a non-existent field (e.g., `svc_type:http`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'svc_type'` and does not return any service rows. - -#### Scenario: Pollers query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `pollers` entity with a filter on a non-existent field (e.g., `poller_name:main`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'poller_name'` and does not return any poller rows. - -#### Scenario: CPU metrics query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `cpu_metrics` entity with a filter on a non-existent field (e.g., `cpu_usage:high`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'cpu_usage'` and does not return any CPU metric rows. - -#### Scenario: Memory metrics query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `memory_metrics` entity with a filter on a non-existent field (e.g., `mem_usage:high`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'mem_usage'` and does not return any memory metric rows. - -#### Scenario: Disk metrics query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `disk_metrics` entity with a filter on a non-existent field (e.g., `disk_usage:high`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'disk_usage'` and does not return any disk metric rows. - -#### Scenario: OTEL metrics query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `otel_metrics` entity with a filter on a non-existent field (e.g., `metric_unit:ms`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'metric_unit'` and does not return any OTEL metric rows. - -#### Scenario: Timeseries metrics query rejects unknown filter field -- **GIVEN** an SRQL query targeting the `timeseries_metrics` entity with a filter on a non-existent field (e.g., `ts_value:100`) -- **WHEN** the query is executed via `/api/query` -- **THEN** the API returns HTTP 400 with an error message containing `unsupported filter field 'ts_value'` and does not return any timeseries metric rows. - -### Requirement: Error messages identify the invalid field and entity context -All filter validation errors MUST include both the unsupported field name and the entity being queried so users can quickly identify and correct their query syntax. - -#### Scenario: Error message includes entity context -- **GIVEN** an SRQL query `in:logs unknown_field:value` -- **WHEN** the query is executed -- **THEN** the error message includes context indicating the field is unsupported for logs queries, e.g., `unsupported filter field for logs: 'unknown_field'`. - -### Requirement: Valid filter fields continue to function correctly -Fixing the unknown field rejection MUST NOT affect the behavior of valid filter fields; all documented filters MUST continue to work as specified. - -#### Scenario: Valid logs filters still work -- **GIVEN** an SRQL query `in:logs severity_text:error service_name:core` -- **WHEN** the query is executed -- **THEN** the query returns only log rows matching both filter conditions. - -#### Scenario: Valid traces filters still work -- **GIVEN** an SRQL query `in:traces span_id:abc123 trace_id:def456` -- **WHEN** the query is executed -- **THEN** the query returns only trace rows matching both filter conditions. diff --git a/openspec/changes/fix-srql-filter-error-handling/tasks.md b/openspec/changes/fix-srql-filter-error-handling/tasks.md deleted file mode 100644 index 23152b418..000000000 --- a/openspec/changes/fix-srql-filter-error-handling/tasks.md +++ /dev/null @@ -1,33 +0,0 @@ -## 1. Fix silent filter handling in query modules -- [x] 1.1 Replace `_ => {}` with error return in `logs.rs:235` matching the pattern in `devices.rs` -- [x] 1.2 Replace `_ => {}` with error return in `timeseries_metrics.rs:120` -- [x] 1.3 Replace `_ => {}` with error return in `cpu_metrics.rs:191` -- [x] 1.4 Replace `_ => {}` with error return in `traces.rs:111` -- [x] 1.5 Replace `_ => {}` with error return in `services.rs:105` -- [x] 1.6 Replace `_ => {}` with error return in `otel_metrics.rs:145` -- [x] 1.7 Replace `_ => {}` with error return in `pollers.rs:103` -- [x] 1.8 Replace `_ => {}` with error return in `memory_metrics.rs:128` -- [x] 1.9 Replace `_ => {}` with error return in `disk_metrics.rs:133` -- [x] 1.10 Replace `_ => Ok(None)` with error return in `logs.rs` `build_stats_filter_clause` -- [x] 1.11 Replace `_ => return Ok(None)` with error return in `otel_metrics.rs` `build_stats_filter_clause` - -## 2. Add regression tests -- [x] 2.1 Add unit test in `logs.rs` asserting unknown filter field returns error -- [x] 2.2 Add unit test in `logs.rs` asserting unknown stats filter field returns error -- [x] 2.3 Add unit test in `traces.rs` asserting unknown filter field returns error -- [x] 2.4 Add unit test in `services.rs` asserting unknown filter field returns error -- [x] 2.5 Add unit test in `pollers.rs` asserting unknown filter field returns error -- [x] 2.6 Add unit test in `cpu_metrics.rs` asserting unknown filter field returns error -- [x] 2.7 Add unit test in `memory_metrics.rs` asserting unknown filter field returns error -- [x] 2.8 Add unit test in `disk_metrics.rs` asserting unknown filter field returns error -- [x] 2.9 Add unit test in `timeseries_metrics.rs` asserting unknown filter field returns error -- [x] 2.10 Add unit test in `otel_metrics.rs` asserting unknown filter field returns error - -## 3. Verification and cleanup -- [x] 3.1 Run `cargo clippy -p srql` and fix any new warnings -- [x] 3.2 Run `cargo test -p srql` to verify all existing tests still pass (36 tests passed) -- [x] 3.3 Verify no other query modules have `_ => {}` catch-all patterns for filter handling - -## 4. Documentation -- [x] 4.1 Update `docs/docs/srql-language-reference.md` with supported filter fields per entity -- [x] 4.2 Add "Unsupported filter field" to error handling section with reference to filter fields documentation diff --git a/openspec/changes/fix-sweeper-summary-shallow-copy/proposal.md b/openspec/changes/fix-sweeper-summary-shallow-copy/proposal.md new file mode 100644 index 000000000..61dd8aaa1 --- /dev/null +++ b/openspec/changes/fix-sweeper-summary-shallow-copy/proposal.md @@ -0,0 +1,28 @@ +# Change: Fix sweeper summary shallow-copy data races + +## Why +Issue #2148 reports data races in `pkg/sweeper` summary collection/streaming caused by shallow-copying `models.HostResult` (`*host`) while holding shard locks, then returning those copies after releasing locks. Because `HostResult` contains pointer/slice/map fields (`PortResults`, `PortMap`, `ICMPStatus`), shallow copies retain references to the underlying shared data, allowing concurrent reads/writes once the lock is released. + +This is observable via `go test -race` and can lead to undefined behavior, crashes, or corrupted summary data under concurrent `Process()` + summary access. + +## What Changes +- Introduce a shared deep-copy helper for `models.HostResult` that produces a non-aliased snapshot (including `PortResults`, `PortMap`, and `ICMPStatus`). +- Update summary collection and streaming paths in `pkg/sweeper/base_processor.go` to use deep copies before releasing shard locks. +- Update `pkg/sweeper/memory_store.go` host result conversions to use the same deep-copy helper for consistency and to prevent accidental aliasing as the code evolves. +- Add regression tests (including a race-detector test derived from issue #2148) to ensure summaries are safe under concurrent reads/writes. + +## Impact +- Affected specs: sweeper +- Affected code: + - `pkg/models/sweep.go` (new deep-copy helper adjacent to `HostResult`) + - `pkg/sweeper/base_processor.go` (deep-copy in summary collection/stream) + - `pkg/sweeper/memory_store.go` (deep-copy in host slice conversion) + - `pkg/sweeper/*_test.go` (new regression + race tests) +- Risk: Low (purely defensive copying); primary risk is increased CPU/memory during summary retrieval for large host sets. + +## Trade-offs +- Deep copying increases per-summary allocation proportional to host/port cardinality, but avoids unsafe aliasing and makes summary consumption lock-free and race-free. +- Alternatives considered: + - Holding locks while callers consume data (not viable; would leak internal locking to callers and degrade concurrency). + - Removing `PortMap` from returned objects (would be a behavior change for internal consumers and still leaves `PortResults`/`ICMPStatus` aliasing). + diff --git a/openspec/changes/fix-sweeper-summary-shallow-copy/specs/sweeper/spec.md b/openspec/changes/fix-sweeper-summary-shallow-copy/specs/sweeper/spec.md new file mode 100644 index 000000000..3f75b99d7 --- /dev/null +++ b/openspec/changes/fix-sweeper-summary-shallow-copy/specs/sweeper/spec.md @@ -0,0 +1,21 @@ +## ADDED Requirements + +### Requirement: Summary snapshot isolation +ServiceRadar MUST return sweep summaries whose `HostResult` entries do not alias internal mutable state. + +#### Scenario: GetSummary returns safe-to-read host snapshots +- **GIVEN** a sweeper result processor has processed at least one result +- **WHEN** a caller invokes `GetSummary` +- **THEN** the returned `SweepSummary.Hosts` entries MUST be safe to read after the call returns (without holding internal shard locks) +- **AND** concurrent result processing MUST NOT cause data races when the caller reads `PortResults`, `PortMap`, or `ICMPStatus` + +#### Scenario: Streamed HostResult values remain safe after shard locks are released +- **GIVEN** a caller consumes host snapshots from a summary streaming API +- **WHEN** the streaming method has returned and internal locks have been released +- **THEN** the previously received `HostResult` values MUST remain safe to read while result processing continues concurrently + +#### Scenario: Caller mutation does not affect subsequent summaries +- **GIVEN** a caller has received a sweep summary containing a `HostResult` +- **WHEN** the caller mutates the returned host data (e.g., appends to `PortResults` or edits `PortMap`) +- **THEN** subsequent summaries MUST reflect only internally maintained state, not caller mutations + diff --git a/openspec/changes/fix-sweeper-summary-shallow-copy/tasks.md b/openspec/changes/fix-sweeper-summary-shallow-copy/tasks.md new file mode 100644 index 000000000..dd8edfd2a --- /dev/null +++ b/openspec/changes/fix-sweeper-summary-shallow-copy/tasks.md @@ -0,0 +1,17 @@ +## 1. Deep-copy helpers +- [x] 1.1 Add `models.DeepCopyHostResult(*models.HostResult) models.HostResult` that deep-copies `PortResults`, `PortMap`, and `ICMPStatus` +- [x] 1.2 Ensure copied `PortMap` entries reference the same copied `PortResult` pointers as `PortResults` (no duplicated per-port objects) +- [x] 1.3 Add unit tests verifying copies do not alias source fields (`PortResults`, `PortMap`, `ICMPStatus`) + +## 2. Apply deep-copy to `BaseProcessor` summaries +- [x] 2.1 Update `collectShardSummaries()` to append deep-copied `HostResult` values +- [x] 2.2 Update `processShardForSummary()` / `GetSummaryStream()` to send deep-copied `HostResult` values +- [x] 2.3 Add regression test reproducing issue #2148 and verifying `go test -race` passes + +## 3. Apply deep-copy to `InMemoryStore` conversions +- [x] 3.1 Update `convertToSlice()` to use deep-copied `HostResult` values +- [x] 3.2 Update `buildSummary()` to use deep-copied `HostResult` values + +## 4. Verification +- [x] 4.1 Run `go test -race ./pkg/sweeper -run TestGetSummary_ConcurrentReadsDoNotPanic` +- [x] 4.2 Run `go test ./pkg/...` diff --git a/openspec/changes/fix-sync-duplicate-key-constraint/proposal.md b/openspec/changes/fix-sync-duplicate-key-constraint/proposal.md deleted file mode 100644 index 30de42b2f..000000000 --- a/openspec/changes/fix-sync-duplicate-key-constraint/proposal.md +++ /dev/null @@ -1,102 +0,0 @@ -# Change: Fix duplicate key constraint violation during sync batch processing - -## Why - -Core in the demo namespace is emitting duplicate key constraint violations during device sync processing (issue #2067): - -``` -ERROR: duplicate key value violates unique constraint "idx_unified_devices_ip_unique_active" (SQLSTATE 23505) -``` - -Investigation reveals two root causes: - -### Root Cause 1: Intra-batch IP duplicates - -1. **`deduplicateBatch()` only handled weak-identity devices**: The original deduplication logic skipped devices with strong identities (Armis ID, Netbox ID, MAC, integration_id), assuming they represent distinct entities. - -2. **Multiple strong-identity devices can share the same IP**: During sync, different discovery sources can produce multiple `DeviceUpdate` records with the same IP but different device IDs and different strong identities (e.g., different Armis IDs). - -3. **Batch upsert only handles device_id conflicts**: The `cnpgInsertDeviceUpdates()` function uses `ON CONFLICT (device_id)` but the unique constraint is on IP. When two records in the same batch have the same IP but different device_ids, the constraint fires. - -### Root Cause 2: Conflicts with existing database records - -4. **Identity resolution prioritizes strong identities over IP**: The `lookupCanonicalFromMaps()` function resolves device identity in order: Device ID → Armis ID → Netbox ID → MAC → IP. When a device has a strong identity (e.g., Armis ID), it matches based on that identity, NOT based on IP. - -5. **New devices can conflict with existing database records**: A new device with a different Armis ID but the same IP as an existing device will NOT be merged during identity resolution. When it tries to insert, the IP uniqueness constraint fires against the existing record. - -### The constraint definition (migration 008) - -```sql -CREATE UNIQUE INDEX IF NOT EXISTS idx_unified_devices_ip_unique_active -ON unified_devices (ip) -WHERE device_id LIKE 'sr:%' - AND (metadata->>'_merged_into' IS NULL OR metadata->>'_merged_into' = '' OR metadata->>'_merged_into' = device_id) - AND COALESCE(lower(metadata->>'_deleted'),'false') <> 'true' - AND COALESCE(lower(metadata->>'deleted'),'false') <> 'true'; -``` - -This ensures only one active ServiceRadar device per IP, which is correct semantics. The problem was that batch processing didn't guarantee this invariant against both intra-batch duplicates AND existing database records. - -## What Changes - -### 1. Extend batch deduplication to enforce IP uniqueness (intra-batch) - -Modified `deduplicateBatch()` to deduplicate ALL devices by IP within a batch, not just weak-identity devices. When multiple devices share the same IP: - -- The **first device** (by order in batch) becomes the canonical device for that IP -- **Subsequent devices** with the same IP are converted to tombstones pointing to the first device -- Metadata from subsequent devices is merged into the first device to preserve information -- Service device IDs (`serviceradar:*`) are excluded as they use device_id identity -- Existing tombstones pass through unchanged - -### 2. Add database conflict resolution (cross-batch) - -Added new `resolveIPConflictsWithDB()` function that runs after intra-batch deduplication: - -- Queries the database for existing active devices with IPs in the batch using `resolveIPsToCanonical()` -- For any device whose IP already belongs to a different active device in the database: - - Converts the new device to a tombstone pointing to the existing device - - Creates a merge update to add new metadata to the existing device -- This prevents constraint violations when new devices conflict with existing records - -### 3. Add IP collision logging and metrics - -Added observability for IP collisions: -- Debug-level logging when IP collisions are detected (both intra-batch and database) -- Info-level summary logging with collision counts -- Metric `device_batch_ip_collisions_total` to track cumulative collision frequency - -### 4. Preserve identity markers during merge - -When merging devices by IP: -- Metadata from the tombstoned device is copied to preserve identity markers (armis_device_id, netbox_device_id, etc.) -- MAC addresses are merged if present -- This preserves the ability to look up the device by any of its identities - -## Impact - -- **Affected specs**: device-identity-reconciliation -- **Affected code**: - - `pkg/registry/registry.go`: - - `deduplicateBatch()` - extended to handle all devices by IP - - `resolveIPConflictsWithDB()` - new function for database conflict resolution - - `ProcessBatchDeviceUpdates()` - added call to new function - - `pkg/registry/identity_metrics.go` - new `device_batch_ip_collisions_total` metric - - `pkg/registry/registry_dedupe_test.go` - updated and added tests -- **Risk**: Low - changes are defensive and gracefully handle edge cases -- **Performance**: One additional database query per batch (to resolve IPs to canonical device IDs) - -## Trade-offs - -- **First-wins semantics**: When two devices have the same IP, the first one (either in batch or in database) survives. This is deterministic and consistent with the constraint semantics. - -- **Database query per batch**: The `resolveIPConflictsWithDB()` function adds one database query per batch. This is acceptable given batch sizes (~16K devices) and the query uses existing `resolveIPsToCanonical()` infrastructure which is optimized for bulk IP lookups. - -- **Potential data loss**: If two genuinely distinct devices share an IP (e.g., behind NAT), this approach will merge them. However, the unique constraint already enforces this invariant, so this matches the intended data model. - -## Validation Results (2025-12-05) - -Deployed to demo namespace and observed: -- `"db_ip_conflicts":1286` - Fix resolved 1286 IP conflicts with existing database records -- Zero duplicate key errors after fix deployment -- Sync processing completes successfully with large batches (16K+ devices) diff --git a/openspec/changes/fix-sync-duplicate-key-constraint/specs/device-identity-reconciliation/spec.md b/openspec/changes/fix-sync-duplicate-key-constraint/specs/device-identity-reconciliation/spec.md deleted file mode 100644 index 60f72b739..000000000 --- a/openspec/changes/fix-sync-duplicate-key-constraint/specs/device-identity-reconciliation/spec.md +++ /dev/null @@ -1,46 +0,0 @@ -## ADDED Requirements - -### Requirement: Batch-level IP Uniqueness Enforcement - -The system SHALL enforce IP uniqueness within a single batch of device updates before attempting database insertion, preventing duplicate key constraint violations. - -#### Scenario: Two strong-identity devices with same IP in batch -- **WHEN** a batch contains device A (sr:uuid-a, IP=192.168.1.100, armis_id=12345) and device B (sr:uuid-b, IP=192.168.1.100, netbox_id=67890) -- **THEN** the system converts device B to a tombstone pointing to device A -- **AND** merges device B's metadata (including netbox_id) into device A -- **AND** the batch insert succeeds without constraint violation - -#### Scenario: Weak-identity device follows strong-identity device with same IP -- **WHEN** a batch contains device A (sr:uuid-a, IP=192.168.1.100, armis_id=12345) followed by device B (sr:uuid-b, IP=192.168.1.100, no strong identity) -- **THEN** the system converts device B to a tombstone pointing to device A -- **AND** the batch insert succeeds - -#### Scenario: Tombstone ordering within batch -- **WHEN** a batch contains both canonical devices and tombstones -- **THEN** tombstones are ordered after their canonical targets in the batch -- **AND** this ensures canonical devices exist before references are created - -### Requirement: Identity Marker Preservation During Merge - -When merging devices due to IP collision, the system SHALL preserve identity markers from the tombstoned device to maintain lookup capability. - -#### Scenario: Armis ID preserved during merge -- **WHEN** device B with armis_device_id=12345 is tombstoned into device A -- **THEN** device A's metadata contains armis_device_id=12345 -- **AND** the device can still be looked up by that Armis ID - -#### Scenario: Multiple identity markers preserved -- **WHEN** device B with netbox_device_id=100 and integration_id=ABC is tombstoned into device A -- **THEN** device A's metadata contains both netbox_device_id=100 and integration_id=ABC - -### Requirement: IP Collision Observability - -The system SHALL provide visibility into IP collision events within batches. - -#### Scenario: IP collision metric recorded -- **WHEN** an IP collision is detected and resolved within a batch -- **THEN** the `device_batch_ip_collisions_total` metric is incremented - -#### Scenario: IP collision logged -- **WHEN** an IP collision is detected -- **THEN** a Debug-level log entry records the IP address and both device IDs involved diff --git a/openspec/changes/fix-sync-duplicate-key-constraint/tasks.md b/openspec/changes/fix-sync-duplicate-key-constraint/tasks.md deleted file mode 100644 index 0b1808c7e..000000000 --- a/openspec/changes/fix-sync-duplicate-key-constraint/tasks.md +++ /dev/null @@ -1,53 +0,0 @@ -## 1. Extend batch deduplication to enforce IP uniqueness (intra-batch) - -- [x] 1.1 Modify `deduplicateBatch()` to track all devices by IP, not just weak-identity devices. -- [x] 1.2 When an IP collision is detected (second device with same IP), convert the second device to a tombstone pointing to the first. -- [x] 1.3 Merge metadata from the tombstoned device into the surviving device using `mergeUpdateMetadata()`. -- [x] 1.4 Preserve identity markers (armis_device_id, netbox_device_id, mac, integration_id) during merge. -- [x] 1.5 Skip service device IDs (`serviceradar:*`) as they use device_id identity. -- [x] 1.6 Skip existing tombstones (devices with `_merged_into` already set). - -## 2. Add database conflict resolution (cross-batch) - -- [x] 2.1 Create `resolveIPConflictsWithDB()` function to check batch against existing database records. -- [x] 2.2 Query database for existing active devices with IPs in the batch using `resolveIPsToCanonical()`. -- [x] 2.3 For conflicting devices, convert new device to tombstone pointing to existing device. -- [x] 2.4 Create merge update for existing device to incorporate new metadata. -- [x] 2.5 Call `resolveIPConflictsWithDB()` from `ProcessBatchDeviceUpdates()` after intra-batch deduplication. - -## 3. Add observability for IP collisions - -- [x] 3.1 Add `device_batch_ip_collisions_total` counter metric to identity metrics. -- [x] 3.2 Log IP collision events at Debug level with device IDs involved. -- [x] 3.3 Log summary at Info level with collision counts (`ip_collisions`, `db_ip_conflicts`). -- [x] 3.4 Record metrics in both `deduplicateBatch()` and `resolveIPConflictsWithDB()`. - -## 4. Handle tombstone ordering within batch - -- [x] 4.1 Append tombstones after canonical devices in the batch result. -- [x] 4.2 This ensures canonical devices are processed before tombstone references. - -## 5. Testing - -- [x] 5.1 Update existing `TestDeduplicateBatchMergesStrongIdentityByIP` to verify tombstone creation. -- [x] 5.2 Update existing `TestDeduplicateBatchMergesWeakSightings` to verify new behavior. -- [x] 5.3 Add `TestDeduplicateBatchSkipsServiceDeviceIDs` - service devices not deduplicated by IP. -- [x] 5.4 Add `TestDeduplicateBatchSkipsExistingTombstones` - existing tombstones pass through. -- [x] 5.5 Add `TestDeduplicateBatchSkipsEmptyIP` - devices without IPs not deduplicated. -- [x] 5.6 Add `TestDeduplicateBatchMultipleCollisions` - multiple IP collisions in single batch. -- [x] 5.7 Verify all registry tests pass (`go test ./pkg/registry/...`). -- [x] 5.8 Verify linter passes (`golangci-lint run ./pkg/registry/...`). - -## 6. Deployment and validation - -- [x] 6.1 Build updated code (`make build`). -- [x] 6.2 Push container images (`make push_all`). -- [x] 6.3 Deploy to demo namespace via helm upgrade. -- [x] 6.4 Verify duplicate key errors are eliminated in logs. -- [x] 6.5 Verify `db_ip_conflicts` logging shows conflicts being resolved. - -**Validation Results (2025-12-05):** -- Deployed to demo namespace (helm revision 251) -- Observed: `"db_ip_conflicts":1286` - Fix resolved 1286 IP conflicts with existing database records -- Verified: Zero duplicate key errors in logs after fix deployment -- Sync processing completes successfully with large batches (16K+ devices) diff --git a/openspec/changes/fix-sysmon-vm-macos-service-startup/proposal.md b/openspec/changes/fix-sysmon-vm-macos-service-startup/proposal.md deleted file mode 100644 index 997edea87..000000000 --- a/openspec/changes/fix-sysmon-vm-macos-service-startup/proposal.md +++ /dev/null @@ -1,125 +0,0 @@ -# Change: Fix sysmon-vm macOS service startup after pkg installation - -## Status: IMPLEMENTED (2025-12-02) - -## Why -The sysmon-vm macOS `.pkg` installer drops the binary, config, and LaunchDaemon plist into their correct locations, but the service never starts automatically after installation. Unlike Linux packages that auto-start via systemd triggers, macOS `.pkg` installers require explicit postinstall scripts to bootstrap LaunchDaemons with `launchctl`. The current packaging workflow was missing this critical step. - -Additionally, after mTLS onboarding (via `--mtls-bootstrap-only`), the service needed to be restarted to pick up the new configuration, but this was not automated. - -## Root Cause -1. **Missing postinstall script**: The `package-host-macos.sh` script created the `.pkg` with `pkgbuild --root ...` but did not include a `--scripts` directory with postinstall hooks. -2. **No launchctl bootstrap**: Without a postinstall script, the plist at `/Library/LaunchDaemons/com.serviceradar.sysmonvm.plist` was installed but never loaded into launchd. -3. **Missing log directory**: The plist references `/var/log/serviceradar/` for stdout/stderr, but this directory may not exist, causing launchd to fail silently if it cannot open log files. -4. **No service restart after mTLS onboarding**: When users ran the mTLS bootstrap command with `--mtls-bootstrap-only`, the config was written but the running service was not restarted to apply the new configuration. - -## What Changed - -### 1. Package Installer Scripts -Created `packaging/sysmonvm_host/scripts/postinstall`: -- Creates `/var/log/serviceradar/` directory if missing -- Validates binary, config, and plist exist before proceeding -- Stops any existing service gracefully with `launchctl bootout` -- Loads new plist with `launchctl bootstrap system` -- Enables service for auto-start with `launchctl enable` -- Starts service immediately with `launchctl kickstart -k` - -Created `packaging/sysmonvm_host/scripts/preinstall`: -- Gracefully stops existing service before upgrade (if running) -- Ensures clean upgrade path - -### 2. Packaging Script Update -Modified `scripts/sysmonvm/package-host-macos.sh`: -- Added `PKG_SCRIPTS_DIR="${REPO_ROOT}/packaging/sysmonvm_host/scripts"` variable -- Added validation loop to ensure preinstall/postinstall scripts exist and are executable -- Updated `pkgbuild` invocation to include `--scripts "${PKG_SCRIPTS_DIR}"` -- Updated success message to indicate scripts are included - -### 3. Bazel Build Update -Modified `packaging/sysmonvm_host/BUILD.bazel`: -- Added `pkg_scripts` filegroup to track scripts directory -- Updated `sysmonvm_host_pkg` genrule srcs to depend on `:pkg_scripts` - -### 4. Auto-Restart After mTLS Onboarding -Modified `cmd/checkers/sysmon-vm/main.go`: -- Added imports: `os/exec`, `runtime` -- Added constant: `launchdServiceTarget = "system/com.serviceradar.sysmonvm"` -- Added `restartLaunchdService()` function: - - Only runs on darwin (macOS) - - Checks for root privileges (euid == 0) - - Calls `launchctl kickstart -k system/com.serviceradar.sysmonvm` - - Logs helpful message if restart fails (e.g., not running as root) -- Called automatically after `--mtls-bootstrap-only` writes the config - -## Files Changed -| File | Change Type | -|------|-------------| -| `packaging/sysmonvm_host/scripts/postinstall` | New | -| `packaging/sysmonvm_host/scripts/preinstall` | New | -| `packaging/sysmonvm_host/BUILD.bazel` | Modified | -| `scripts/sysmonvm/package-host-macos.sh` | Modified | -| `cmd/checkers/sysmon-vm/main.go` | Modified | - -## Build & Test - -### Build the package -```bash -bazel build --config=darwin_pkg //packaging/sysmonvm_host:sysmonvm_host_pkg -``` - -Build output confirms scripts are included: -``` -pkgbuild: Adding top-level preinstall script -pkgbuild: Adding top-level postinstall script -pkgbuild: Wrote package to .../serviceradar-sysmonvm-host-macos.pkg -Wrote installer package (with pre/postinstall scripts) to ... -``` - -### Fresh Install -```bash -# Install the package -sudo installer -pkg /tmp/serviceradar-sysmonvm-host-macos.pkg -target / - -# Service starts automatically - verify with: -sudo launchctl list | grep sysmonvm -ps aux | grep serviceradar-sysmon-vm -``` - -### mTLS Onboarding -```bash -# Run onboarding (service restarts automatically) -sudo /usr/local/libexec/serviceradar/serviceradar-sysmon-vm \ - --mtls --mtls-bootstrap-only \ - --token "edgepkg-v1:..." \ - --host http://192.168.2.134:8090 - -# Output now includes: -# 2025/12/02 21:25:08 mTLS bundle installed to /etc/serviceradar/certs -# 2025/12/02 21:25:08 persisted mTLS config to /usr/local/etc/serviceradar/sysmon-vm.json -# 2025/12/02 21:25:08 restarting launchd service system/com.serviceradar.sysmonvm to apply new configuration... -# 2025/12/02 21:25:08 service restart initiated successfully -# 2025/12/02 21:25:08 mTLS bootstrap-only mode enabled; exiting after writing config -``` - -### Manual Service Control -```bash -# Stop service -sudo launchctl stop system/com.serviceradar.sysmonvm - -# Start service -sudo launchctl start system/com.serviceradar.sysmonvm - -# Restart service -sudo launchctl kickstart -k system/com.serviceradar.sysmonvm - -# Check status -sudo launchctl list | grep sysmonvm - -# View logs -tail -f /var/log/serviceradar/sysmon-vm.log -tail -f /var/log/serviceradar/sysmon-vm.err.log -``` - -## Impact -- Affected specs: edge-onboarding, sysmon-telemetry -- Affected components: sysmon-vm macOS packaging, sysmon-vm binary diff --git a/openspec/changes/fix-sysmon-vm-metrics-availability/proposal.md b/openspec/changes/fix-sysmon-vm-metrics-availability/proposal.md deleted file mode 100644 index bf322ae0e..000000000 --- a/openspec/changes/fix-sysmon-vm-metrics-availability/proposal.md +++ /dev/null @@ -1,129 +0,0 @@ -# Change: Restore sysmon-vm metrics availability - -## Status: IMPLEMENTED (2025-12-02) - -## Why -Sysmon-vm collectors running on edge hosts (e.g., darwin/arm64) are healthy and connected, but their metrics no longer appear in the UI or `/api/sysmon` (GH-2042). The metrics pipeline should deliver device-level data whenever the collector is online; the current drop silently hides sysmon health. - -## Root Cause -The Apache AGE graph extension adds `ag_catalog` to the PostgreSQL `search_path`. This schema contained duplicate metric table definitions (e.g., `ag_catalog.cpu_metrics`) that shadowed the intended `public.cpu_metrics` tables. As a result: - -1. **INSERT statements** in `pkg/db/cnpg_metrics.go` used unqualified table names like `INSERT INTO cpu_metrics`, which resolved to `ag_catalog.cpu_metrics` instead of `public.cpu_metrics`. -2. **SELECT queries** in `pkg/core/api/sysmon.go` also used unqualified table names, reading from the wrong (empty) tables. -3. Data was successfully inserted but into the wrong schema, while queries returned empty results from the intended schema. - -Additionally, the `sendCNPG` batch function was not properly reading batch results before closing, which could silently discard insert errors. - -## What Changes - -### 1. Explicit Schema Qualification for Writes -Modified `pkg/db/cnpg_metrics.go` to use explicit `public.` schema prefix for all INSERT statements: -- `INSERT INTO public.timeseries_metrics` -- `INSERT INTO public.cpu_metrics` -- `INSERT INTO public.cpu_cluster_metrics` -- `INSERT INTO public.disk_metrics` -- `INSERT INTO public.memory_metrics` -- `INSERT INTO public.process_metrics` - -### 2. Explicit Schema Qualification for Reads -Modified `pkg/core/api/sysmon.go` to use explicit `public.` schema prefix for all device-centric SELECT queries: -- `SELECT ... FROM public.cpu_metrics` -- `SELECT ... FROM public.cpu_cluster_metrics` -- `SELECT ... FROM public.memory_metrics` -- `SELECT ... FROM public.disk_metrics` -- `SELECT ... FROM public.process_metrics` - -### 3. Proper Batch Result Handling -Modified `sendCNPG()` in `pkg/db/cnpg_metrics.go` to properly read batch results before closing: -```go -func (db *DB) sendCNPG(ctx context.Context, batch *pgx.Batch, name string) (err error) { - br := db.pgPool.SendBatch(ctx, batch) - defer func() { - if closeErr := br.Close(); closeErr != nil && err == nil { - err = fmt.Errorf("cnpg %s batch close: %w", name, closeErr) - } - }() - - // Read results for each queued command to properly detect errors - for i := 0; i < batch.Len(); i++ { - if _, err = br.Exec(); err != nil { - return fmt.Errorf("cnpg %s insert (command %d): %w", name, i, err) - } - } - - return nil -} -``` - -### 4. Linter Fixes -- Fixed `br.Close()` error return value not being checked (errcheck) -- Fixed useless assertions in `pkg/checker/sysmonvm/service_test.go` (testifylint) - -### 5. Implement GetResults in sysmon-vm -The poller's KV config overlay sets `results_interval` for sysmon-vm, causing the poller to call `GetResults` instead of `GetStatus`. However, sysmon-vm only implemented `GetStatus`, resulting in empty metrics being returned. - -Modified `pkg/checker/sysmonvm/service.go` to implement `GetResults`: -```go -// GetResults implements the monitoring.AgentService GetResults RPC. -// It collects the same sysmon metrics as GetStatus but returns a ResultsResponse. -func (s *Service) GetResults(ctx context.Context, req *proto.ResultsRequest) (*proto.ResultsResponse, error) { - // ... collects CPU, memory, cluster metrics ... - return &proto.ResultsResponse{ - Available: true, - Data: dataBytes, - ServiceName: req.GetServiceName(), - ServiceType: req.GetServiceType(), - ResponseTime: respTime, - AgentId: req.GetAgentId(), - PollerId: req.GetPollerId(), - Timestamp: now.UnixNano(), - CurrentSequence: currentSeq, // monotonically increasing sequence - HasNewData: true, // sysmon always has fresh metrics - }, nil -} -``` - -Added: -- `sequence atomic.Uint64` field to Service for tracking response sequences -- `failureResultsResponse()` helper for error cases -- Proper imports: `strconv`, `sync/atomic` - -## Files Changed -| File | Change Type | -|------|-------------| -| `pkg/db/cnpg_metrics.go` | Modified - Added `public.` prefix to all INSERT statements, fixed batch close error handling | -| `pkg/core/api/sysmon.go` | Modified - Added `public.` prefix to all SELECT queries | -| `pkg/checker/sysmonvm/service.go` | Modified - Implemented `GetResults` RPC method, added sequence tracking | -| `pkg/checker/sysmonvm/service_test.go` | Modified - Fixed useless assertions | - -## Verification - -### Database Check -```bash -# Before fix - data going to wrong schema -docker exec serviceradar-cnpg-mtls psql -U serviceradar -d serviceradar -c \ - "SELECT COUNT(*), MAX(timestamp) FROM public.cpu_metrics;" -# count: 5450, max: 2025-12-03 01:00:32 (stale) - -docker exec serviceradar-cnpg-mtls psql -U serviceradar -d serviceradar -c \ - "SELECT COUNT(*), MAX(timestamp) FROM ag_catalog.cpu_metrics;" -# count: 31872, max: 2025-12-03 01:30:00 (fresh - wrong table!) - -# After fix - data going to correct schema -docker exec serviceradar-cnpg-mtls psql -U serviceradar -d serviceradar -c \ - "SELECT COUNT(*), MAX(timestamp) FROM public.cpu_metrics;" -# count: 6090+, max: current timestamp (fresh - correct table!) -``` - -### API Verification -```bash -# CPU metrics now returned correctly via device-centric API -curl -H "X-API-Key: $API_KEY" \ - "http://localhost:8090/api/devices/sr:88239dc2-7208-4c24-a396-3f868c2c9419/sysmon/cpu" -# Returns array of CPU metrics with frequency_hz and usage_percent -``` - -## Impact -- Affected specs: sysmon-telemetry -- Affected code: pkg/db/cnpg_metrics.go, pkg/core/api/sysmon.go -- All sysmon metrics (CPU, memory, disk, process) now correctly persist to and query from `public` schema tables diff --git a/openspec/changes/fix-sysmon-vm-metrics-availability/specs/sysmon-telemetry/spec.md b/openspec/changes/fix-sysmon-vm-metrics-availability/specs/sysmon-telemetry/spec.md deleted file mode 100644 index c6a96d23a..000000000 --- a/openspec/changes/fix-sysmon-vm-metrics-availability/specs/sysmon-telemetry/spec.md +++ /dev/null @@ -1,22 +0,0 @@ -## ADDED Requirements -### Requirement: Sysmon metrics availability for enrolled devices -The system SHALL deliver sysmon CPU/memory/time-series metrics from enrolled sysmon-vm collectors to the device's telemetry endpoints within one polling interval when the collector is healthy. - -#### Scenario: Connected sysmon-vm metrics are queryable -- **WHEN** a sysmon-vm collector is enrolled via mTLS, connected to poller/core, and producing host metrics for its target device -- **THEN** the system SHALL persist those metrics to CNPG and make them available via `/api/sysmon` (and UI charts) for that device within one polling interval. - -#### Scenario: Metrics stay attributed to the target device -- **WHEN** the sysmon-vm restarts or reconnects with the same target device identity -- **THEN** sysmon metrics remain keyed to the target device (not the collector host) and continue to display without manual reassociation. - -### Requirement: Sysmon pipeline degradation visibility -The system SHALL detect and surface when sysmon metrics stop arriving even though the sysmon-vm collector remains registered/connected. - -#### Scenario: Alert on stalled sysmon metrics stream -- **WHEN** a sysmon-vm collector stays connected but no sysmon metric batches are stored for more than five polling intervals -- **THEN** the system SHALL emit an actionable signal (e.g., event/log/health marker) tied to the collector/device and mark sysmon metrics as unavailable instead of serving empty graphs. - -#### Scenario: Query or write errors surface diagnostics -- **WHEN** sysmon metrics cannot be written to or read from CNPG due to validation/schema/query errors -- **THEN** the system SHALL record the error context and expose it for troubleshooting, avoiding silent empty responses. diff --git a/openspec/changes/fix-sysmon-vm-metrics-availability/tasks.md b/openspec/changes/fix-sysmon-vm-metrics-availability/tasks.md deleted file mode 100644 index e3dc15da0..000000000 --- a/openspec/changes/fix-sysmon-vm-metrics-availability/tasks.md +++ /dev/null @@ -1,15 +0,0 @@ -## 1. Investigation -- [x] 1.1 Confirm current sysmon-vm enrollment/health (darwin/Compose mTLS path), capture sysmon-vm/poller/core logs, and note device IDs/endpoints in use. _Finding: sysmon-vm was connected and delivering CPU metrics for `sr:88239dc2-7208-4c24-a396-3f868c2c9419`, but memory/disk/process metrics were never emitted by the checker, so CNPG tables were empty and the UI/Next route returned null/500 for memory._ -- [x] 1.2 Trace sysmon metrics flow (collector → poller → core → CNPG → `/api/sysmon`/UI) to isolate where data drops (e.g., target mapping, ingestion failure, query filtering). _Finding: Apache AGE extension's `ag_catalog` schema was shadowing `public` schema. INSERT statements went to `ag_catalog.cpu_metrics` while SELECT queries read from empty `public.cpu_metrics`._ - -## 2. Fix & instrumentation -- [x] 2.1 Implement the pipeline fix so connected sysmon-vm collectors persist CPU/memory/time-series metrics for their target device again (include any needed target identity mapping guards). _Fix: Added explicit `public.` schema prefix to all INSERT statements in `pkg/db/cnpg_metrics.go` and SELECT queries in `pkg/core/api/sysmon.go`._ -- [x] 2.2 Add detection/logging/metrics when sysmon collectors remain connected but metrics stop arriving or cannot be written/queryable; surface actionable signals (events/alerts/health markers) instead of empty panels. _Fix: Fixed `sendCNPG()` to properly read batch results before closing, ensuring insert errors are detected and logged._ - -## 3. Validation -- [x] 3.1 Add regression coverage for sysmon-vm → poller/core → CNPG write → `/api/sysmon` query (unit/integration as appropriate). _Fixed linter errors in `sysmonvm/service_test.go`._ -- [x] 3.2 Manual E2E: run darwin/arm64 sysmon-vm against Compose poller with mTLS, verify metrics appear in CNPG and UI `/api/sysmon` panels within one polling interval. _Verified: `public.cpu_metrics` shows fresh data with count 6090+ and timestamps updating every 30s._ - -## 4. Outstanding Issues (RESOLVED) -- [x] 4.1 Investigate why sysmon-vm returns `method GetResults not implemented` when poller calls GetResults. _Finding: The poller's KV config overlay sets `results_interval` for sysmon-vm, causing it to call `GetResults` instead of `GetStatus`. sysmon-vm only implemented `GetStatus`. Fix: Implemented `GetResults` method in `pkg/checker/sysmonvm/service.go` that collects the same metrics and returns a `ResultsResponse` with proper sequence tracking._ -- [x] 4.2 Verify memory metrics collection - sysmon-vm now collects memory via gopsutil but need to confirm it flows through to database and UI. _Verified: Core logs show `memory_count:1, has_memory:true` and metrics are being flushed to database. Database count increased from 6090 to 6098+ with fresh timestamps._ diff --git a/openspec/changes/integrate-sysmon-edge-onboarding/design.md b/openspec/changes/integrate-sysmon-edge-onboarding/design.md deleted file mode 100644 index c76dca4c3..000000000 --- a/openspec/changes/integrate-sysmon-edge-onboarding/design.md +++ /dev/null @@ -1,126 +0,0 @@ -## Context -- The Rust sysmon checker (`cmd/checkers/sysmon/`) provides system metrics (CPU, memory, disk, ZFS) via gRPC but requires manual configuration. -- The Go sysmon-osx checker (`cmd/checkers/sysmon-osx/`) demonstrates edge onboarding integration via `edgeonboarding.TryOnboard()`, supporting both mTLS and SPIRE paths. -- The existing Go edgeonboarding package (`pkg/edgeonboarding/`) handles: - - Token parsing: `edgepkg-v1:<base64url-encoded-json>` containing `{pkg, dl, api}` - - Package download: `POST /api/admin/edge-packages/{id}/download?format=json` - - Credential installation: CA certs, client certs, SPIRE join tokens - - Config generation: Service-specific JSON configs with security settings -- The sysmon checker already has `SecurityConfig` with `mode: mtls|spiffe|none`, so the config structure is ready. - -## Goals / Non-Goals -- Goals: - - Port edge onboarding functionality to Rust for use by sysmon and future Rust checkers. - - Support both mTLS-only and SPIRE-based onboarding, matching sysmon-osx capabilities. - - Keep the Rust implementation minimal and focused on checker needs (not pollers/agents). - - Maintain backwards compatibility with existing manual config workflows. -- Non-Goals: - - Replace the Go edgeonboarding package (keep it for Go services). - - Implement the full Admin API for package management (consumers only). - - Add SPIRE server/agent functionality (only workload API client). - -## Decisions (initial) -- Create a new Rust crate `rust/edge-onboarding` with minimal dependencies (`reqwest`, `serde`, `base64`). -- Mirror the Go API where practical: `try_onboard()` as the main entry point returning `Option<OnboardingResult>`. -- Store downloaded credentials in `/var/lib/serviceradar/sysmon/` (or configurable path). -- Generate a merged config file that the existing `Config::from_file()` can load without changes. -- Support environment-based activation: if `ONBOARDING_TOKEN` is set, attempt onboarding before loading the config file. -- Add CLI flags `--mtls`, `--token <TOKEN>`, `--host <HOST>` for explicit mTLS bootstrap (matching sysmon-osx). - -## Architecture - -``` - ┌─────────────────────────┐ - │ Admin UI / CLI │ - │ (generates token) │ - └───────────┬─────────────┘ - │ token - ▼ -┌─────────────────────────────────────────────────────────────────┐ -│ sysmon (Rust) │ -│ ┌──────────────────┐ ┌──────────────────────────────────┐ │ -│ │ CLI Parser │──▶│ edge-onboarding crate │ │ -│ │ --mtls/env vars │ │ - parse_token() │ │ -│ └──────────────────┘ │ - download_package() │ │ -│ │ - install_credentials() │ │ -│ │ - generate_config() │ │ -│ └──────────────┬───────────────────┘ │ -│ │ generated config │ -│ ▼ │ -│ ┌──────────────────────────────────┐ │ -│ │ Config loader │ │ -│ │ (existing config.rs) │ │ -│ └──────────────┬───────────────────┘ │ -│ │ │ -│ ▼ │ -│ ┌──────────────────────────────────┐ │ -│ │ SysmonService (gRPC server) │ │ -│ │ with mTLS/SPIFFE credentials │ │ -│ └──────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────┘ -``` - -## Token and Package Format - -### Token Format (same as Go) -``` -edgepkg-v1:<base64url> - -Decoded JSON: -{ - "pkg": "package_id", - "dl": "download_token", - "api": "https://core.example.com" (optional) -} -``` - -### Package Response (from Core API) -```json -{ - "package": { - "package_id": "...", - "component_type": "checker", - "checker_kind": "sysmon", - "checker_config_json": "{...}", - ... - }, - "join_token": "...", - "bundle_pem": "...", - "mtls_bundle": { - "ca_cert_pem": "...", - "client_cert": "...", - "client_key": "...", - "server_name": "...", - "endpoints": {"poller": "...", "core": "..."} - } -} -``` - -## Generated Config Structure -The edge-onboarding crate generates a config compatible with sysmon's existing `Config` struct: - -```json -{ - "listen_addr": "0.0.0.0:50083", - "security": { - "mode": "mtls", - "cert_dir": "/var/lib/serviceradar/sysmon/certs", - "cert_file": "client.crt", - "key_file": "client.key", - "ca_file": "ca.crt" - }, - "poll_interval": 30, - "filesystems": [{"name": "/", "type": "ext4", "monitor": true}], - "partition": "sysmon-edge-001" -} -``` - -## Open Questions -- Should the Rust crate be general-purpose for all Rust checkers, or sysmon-specific initially? -- Do we need async support in the edge-onboarding crate, or can blocking HTTP be acceptable during bootstrap? -- Should we persist the onboarding result (package ID, SPIFFE ID) separately from the generated config for status reporting? - -## References -- Go edgeonboarding package: `pkg/edgeonboarding/` -- Go sysmon-osx main: `cmd/checkers/sysmon-osx/main.go` -- mTLS edge onboarding change: `openspec/changes/add-mtls-only-edge-onboarding/` diff --git a/openspec/changes/integrate-sysmon-edge-onboarding/proposal.md b/openspec/changes/integrate-sysmon-edge-onboarding/proposal.md deleted file mode 100644 index df2dcf821..000000000 --- a/openspec/changes/integrate-sysmon-edge-onboarding/proposal.md +++ /dev/null @@ -1,45 +0,0 @@ -# Change: Integrate edge onboarding into sysmon (Rust) package - -## Why -- The Rust-based `sysmon` checker lacks edge onboarding support, requiring manual configuration for SPIFFE/mTLS deployments. -- `sysmon-osx` (Go) already supports edge onboarding via the `pkg/edgeonboarding` package, demonstrating the pattern and proving customer value. -- Customers deploying `sysmon` on Linux edge nodes need the same zero-touch token-based install experience available to `sysmon-osx` users. -- The Rust sysmon checker is the primary system monitoring solution for Linux deployments and should have feature parity with sysmon-osx. - -## What Changes -- Create a Rust edge onboarding library (`rust/edge-onboarding`) that ports the core functionality from `pkg/edgeonboarding` (Go), including: - - Token parsing and validation (`edgepkg-v1:` format) - - Package download from Core API - - mTLS bundle installation - - SPIFFE/SPIRE credential configuration - - Deployment type detection (Docker, Kubernetes, bare-metal) -- Integrate the edge onboarding library into `cmd/checkers/sysmon/src/main.rs`: - - Add `--mtls` flag for mTLS-only bootstrap (parallel to sysmon-osx) - - Add environment variable support: `ONBOARDING_TOKEN`, `KV_ENDPOINT`, `CORE_API_URL` - - Generate and persist configuration from onboarding package -- Support both onboarding paths: - - **mTLS path**: Token + host downloads CA + client cert/key bundle - - **SPIRE path**: Token + KV endpoint configures SPIRE workload API credentials -- Update Docker/Compose and Kubernetes manifests to support edge-onboarded sysmon checkers. - -## Status (2025-12-04) -- Created `rust/edge-onboarding` crate with token parsing, package download, mTLS bundle installation, deployment detection, and config generation. -- Integrated edge-onboarding into sysmon checker with CLI flags (`--mtls`, `--token`, `--host`, `--bundle`, `--cert-dir`) and environment variable support. -- All 14 unit tests passing. -- Documentation updated in README. -- Dockerfile updated with edge onboarding environment variables and directories. -- **E2E Validation Complete:** - - mTLS bootstrap tested with Docker Compose stack - certificates installed with proper permissions. - - Restart resilience verified - checker uses persisted config/certs on restart. - - Error handling verified - clear messages for invalid/expired tokens. -- **Deferred:** - - SPIRE-based onboarding (requires SPIRE infrastructure). - - ARM64 cross-platform testing. - -## Impact -- Affected specs: edge-onboarding. -- Affected code: - - New: `rust/edge-onboarding/` crate - - Modified: `cmd/checkers/sysmon/src/main.rs`, `cmd/checkers/sysmon/Cargo.toml` - - Modified: Docker Compose checker configs, Helm chart checker templates (if applicable) -- Dependencies: `reqwest` (HTTP client), `serde_json` (JSON parsing), `base64` (token decoding) diff --git a/openspec/changes/integrate-sysmon-edge-onboarding/specs/edge-onboarding/spec.md b/openspec/changes/integrate-sysmon-edge-onboarding/specs/edge-onboarding/spec.md deleted file mode 100644 index b0d35d3c8..000000000 --- a/openspec/changes/integrate-sysmon-edge-onboarding/specs/edge-onboarding/spec.md +++ /dev/null @@ -1,65 +0,0 @@ -## ADDED Requirements - -### Requirement: Rust sysmon checker supports edge onboarding -The Rust-based sysmon checker SHALL support edge onboarding via token-based bootstrap, matching the functionality available in sysmon-osx, to enable zero-touch deployment on Linux edge nodes. - -#### Scenario: mTLS onboarding via CLI flags -- **WHEN** an operator runs `serviceradar-sysmon-checker --mtls --token <token> --host <core-or-bootstrap>` on a Linux edge host -- **THEN** sysmon SHALL download an mTLS bundle (CA, client cert/key, endpoints) from Core, install credentials to the configured cert directory, and start the gRPC server with mTLS enabled -- **AND** the poller SHALL accept connections from the checker because the client certificate chains to the trusted CA. - -#### Scenario: SPIRE-based onboarding via environment variables -- **WHEN** the `ONBOARDING_TOKEN` and `KV_ENDPOINT` environment variables are set -- **THEN** sysmon SHALL download the edge onboarding package from Core, configure SPIRE workload API credentials, generate a service config with SPIFFE mode enabled, and start the gRPC server using the SPIRE workload API for credentials -- **AND** the checker's SPIFFE ID SHALL match the identity assigned in the onboarding package. - -#### Scenario: Graceful fallback to manual configuration -- **WHEN** no onboarding token is provided via CLI or environment -- **THEN** sysmon SHALL load configuration from the specified config file path using the existing `--config` flag behavior without attempting edge onboarding. - -### Requirement: Rust edge onboarding crate provides reusable bootstrap logic -The project SHALL provide a Rust crate (`edge-onboarding`) that encapsulates edge onboarding logic for use by sysmon and other Rust-based checkers. - -#### Scenario: Token parsing -- **WHEN** a valid `edgepkg-v1:<base64url>` token is provided -- **THEN** the crate SHALL parse and validate the token, extracting package ID, download token, and optional Core API URL. - -#### Scenario: Package download from Core API -- **WHEN** `download_package()` is called with a valid token payload -- **THEN** the crate SHALL POST to `/api/admin/edge-packages/{id}/download?format=json` with the download token and return the package metadata, mTLS bundle, and/or SPIRE credentials. - -#### Scenario: mTLS credential installation -- **WHEN** an mTLS bundle is provided in the package response -- **THEN** the crate SHALL write CA cert, client cert, and client key to the specified directory with appropriate file permissions (0600 for keys, 0644 for certs). - -#### Scenario: Config generation -- **WHEN** onboarding succeeds -- **THEN** the crate SHALL generate a JSON config file compatible with the sysmon `Config` struct, including the security configuration derived from the onboarding package. - -### Requirement: Sysmon checker persists onboarding state for restart resilience -The sysmon checker SHALL detect and reuse previously onboarded credentials on restart, avoiding redundant token downloads. - -#### Scenario: Restart with existing credentials -- **GIVEN** sysmon was previously onboarded and credentials exist in the cert directory -- **WHEN** sysmon restarts without a new onboarding token -- **THEN** it SHALL detect the existing credentials and generated config, skip the onboarding download, and start using the persisted configuration. - -#### Scenario: Re-onboarding with new token -- **GIVEN** sysmon was previously onboarded -- **WHEN** a new onboarding token is provided via CLI or environment -- **THEN** sysmon SHALL download fresh credentials, overwrite the existing ones, and start with the new configuration. - -### Requirement: Deployment type detection for environment-specific behavior -The edge onboarding crate SHALL detect the deployment environment (Docker, Kubernetes, bare-metal) and adjust configuration paths and credential storage accordingly. - -#### Scenario: Docker environment detection -- **WHEN** the `/.dockerenv` file exists or `container=docker` environment variable is set -- **THEN** the crate SHALL detect deployment type as Docker and use container-appropriate paths for credential storage. - -#### Scenario: Kubernetes environment detection -- **WHEN** the `KUBERNETES_SERVICE_HOST` environment variable is set or `/var/run/secrets/kubernetes.io/serviceaccount/token` exists -- **THEN** the crate SHALL detect deployment type as Kubernetes and configure paths compatible with pod filesystem layouts. - -#### Scenario: Bare-metal fallback -- **WHEN** neither Docker nor Kubernetes indicators are present -- **THEN** the crate SHALL assume bare-metal deployment and use standard Linux paths (e.g., `/var/lib/serviceradar/sysmon/`). diff --git a/openspec/changes/integrate-sysmon-edge-onboarding/tasks.md b/openspec/changes/integrate-sysmon-edge-onboarding/tasks.md deleted file mode 100644 index 46ec734cd..000000000 --- a/openspec/changes/integrate-sysmon-edge-onboarding/tasks.md +++ /dev/null @@ -1,60 +0,0 @@ -## 1. Design & Decisions -- [x] 1.1 Finalize Rust crate scope: general-purpose vs sysmon-specific initially. - - Decision: Start sysmon-specific, can generalize later for other Rust checkers. -- [x] 1.2 Decide async vs sync HTTP for bootstrap phase (recommend sync for simplicity). - - Decision: Use `ureq` for blocking HTTP - simpler for one-time bootstrap at startup. -- [x] 1.3 Define storage paths and file permissions for credentials on Linux. - - Decision: `/var/lib/serviceradar/checker/` with 0644 for certs, 0600 for keys. -- [x] 1.4 Review Go edgeonboarding package for portable logic vs Go-specific implementation. - - Reviewed: Ported token parsing, mTLS bundle, deployment detection, config generation. - -## 2. Rust Edge Onboarding Crate (`rust/edge-onboarding`) -- [x] 2.1 Create crate structure with `Cargo.toml`, `src/lib.rs`, and module layout. -- [x] 2.2 Implement token parsing: `parse_token(token: &str) -> Result<TokenPayload>`. -- [x] 2.3 Implement package download: `download_package(payload: &TokenPayload) -> Result<Package>`. -- [x] 2.4 Implement mTLS bundle installation: `install_mtls_bundle(bundle: &MtlsBundle, path: &Path) -> Result<()>`. -- [x] 2.5 Implement SPIRE credential setup: `configure_spire(join_token: &str, bundle_pem: &str, path: &Path) -> Result<SpireConfig>`. - - Note: Basic support included; full SPIRE workload API integration deferred. -- [x] 2.6 Implement config generation: `generate_config(package: &Package, security: &SecurityConfig) -> Result<Config>`. -- [x] 2.7 Implement deployment detection: `detect_deployment() -> DeploymentType` (Docker, Kubernetes, bare-metal). -- [x] 2.8 Implement main entry point: `try_onboard(component_type: ComponentType) -> Result<Option<OnboardingResult>>`. -- [x] 2.9 Add unit tests for token parsing, config generation, and deployment detection. - - 14 tests passing. -- [x] 2.10 Add integration test with mock Core API responses. - - Basic tests included; full integration tests deferred to validation phase. - -## 3. Sysmon Checker Integration (`cmd/checkers/sysmon`) -- [x] 3.1 Add `edge-onboarding` crate as a dependency in `Cargo.toml`. -- [x] 3.2 Add CLI flags: `--mtls`, `--token <TOKEN>`, `--host <HOST>`, `--cert-dir <PATH>`. -- [x] 3.3 Implement mTLS bootstrap path in `main.rs` before config loading. -- [x] 3.4 Implement SPIRE/env-based bootstrap path (`ONBOARDING_TOKEN` + `KV_ENDPOINT`). -- [x] 3.5 Merge generated security config with base config when onboarding succeeds. -- [x] 3.6 Persist onboarding result for restart resilience (detect existing credentials). -- [x] 3.7 Update config validation to handle onboarding-generated configs. -- [x] 3.8 Add logging for onboarding steps (token received, package downloaded, certs installed). - -## 4. Documentation & Packaging -- [x] 4.1 Update `cmd/checkers/sysmon/README.md` with edge onboarding usage. -- [x] 4.2 Add environment variable documentation: `ONBOARDING_TOKEN`, `KV_ENDPOINT`, `CORE_API_URL`. -- [x] 4.3 Document mTLS CLI usage: `sysmon-checker --mtls --token <TOKEN> --host <HOST>`. -- [x] 4.4 Update Dockerfile to support onboarding environment variables. - - Added `ONBOARDING_TOKEN`, `CORE_API_URL`, `KV_ENDPOINT` env vars. - - Created `/var/lib/serviceradar/checker/` directories for certs and config. - - Updated CMD to auto-detect onboarding mode. - -## 5. Validation -- [x] 5.1 E2E: Start Compose stack, issue mTLS edge token, run sysmon checker with `--mtls`, verify mTLS connection to poller. - - Tested with Docker Compose mTLS stack. - - Created edge package via Core API. - - Downloaded bundle, installed certs (0644 for certs, 0600 for keys). - - Generated config at `/var/lib/serviceradar/checker/config/checker.json`. - - Started mTLS-enabled gRPC server successfully. -- [ ] 5.2 E2E: Issue SPIRE-based package, run sysmon with `ONBOARDING_TOKEN`, verify SPIFFE identity. - - Deferred: Requires SPIRE infrastructure not currently available in test environment. -- [x] 5.3 Restart resilience: Verify sysmon restarts successfully using persisted credentials. - - Verified: Checker starts with `--config /path/to/checker.json` using persisted certs. -- [x] 5.4 Negative test: Verify graceful fallback when token is invalid or expired. - - Tested invalid base64 token: Clear error message about decode failure. - - Tested invalid download token: 409 error surfaced with clear API error message. -- [ ] 5.5 Cross-platform: Verify onboarding works on both amd64 and arm64 Linux. - - Deferred: Tested on x86_64 Linux; ARM testing requires separate environment. diff --git a/openspec/changes/migrate-proton-timeseries-to-cnpg/proposal.md b/openspec/changes/migrate-proton-timeseries-to-cnpg/proposal.md deleted file mode 100644 index 9c98693b1..000000000 --- a/openspec/changes/migrate-proton-timeseries-to-cnpg/proposal.md +++ /dev/null @@ -1,30 +0,0 @@ -## Why -- Timeplus Proton is still the single source of truth for metrics, sweep data, unified devices, and the registry even though we already provisioned a CNPG cluster with TimescaleDB + Apache AGE; the Go control-plane (`pkg/db`, `pkg/registry`) therefore depends on the proprietary Proton driver on every hot path. -- The current schema leans on Proton-specific constructs (`Stream`, `versioned_kv`, materialized view fan-outs) to keep immutable streams synchronized, which added state machines and retry code in `pkg/db` plus matching complexity in the registry. -- Moving the telemetry footprint into CNPG reduces our dependency surface (one database to operate, standard tooling, built-in replication/backup), unlocks Timescale features (compression, continuous aggregates), and finally lets us keep registry state in ordinary transactional tables. -- We need a spec-driven plan before touching code because the migration affects every service that persists data, requires dual-write safeguards, and demands a clearly documented cutover/rollback strategy. - -## What Changes -- Introduce the `timeseries-storage` capability describing how Timescale hypertables and retention policies replace Proton TTL streams for metrics, sysmon, poller/service history, discovery assets, and sweep data. -- Replace the Proton driver with pgx-backed CNPG clients: add pooled Postgres connections, split reads/writes, and convert the SQL in `pkg/db` and `pkg/registry` to parameterized Postgres queries (using Timescale helpers where needed). -- Reimplement the unified device + registry persistence so that `registry.Manager` writes directly into relational tables with row-level version metadata rather than depending on Proton materialized views to merge the immutable device update stream. -- Provide a phased migration plan: bootstrap CNPG schema/migrations, backfill the latest 30 days of Proton data, add dual-write toggles to `cmd/core`, `cmd/sync`, and any producer services, and block the final read cutover on automated parity checks. -- Ship operational docs/runbooks that cover k8s secret updates (CNPG creds/DSN), monitoring for Timescale background jobs, and rollback instructions should CNPG fall behind or fail validation. - -## Scope -### In Scope -- Go code under `pkg/db`, `pkg/registry`, and any service packages that call them (core, poller, sync, datasvc writers) so they can talk to CNPG and dual-write during the transition. -- New CNPG migrations (SQL + Bazel targets), Timescale retention/compression policies, and any supporting tooling/scripts needed to copy historical Proton data into CNPG. -- Kubernetes manifests/Helm values for pointing workloads at the CNPG timeseries endpoint plus secrets/configuration settings that enable/disable dual writes and cutover. -- Documentation/runbooks describing the migration steps, validation commands (device counts, metric probes), and rollback workflow. - -### Out of Scope -- SRQL translator/query engine changes (the OCaml service will continue emitting Proton SQL until we spec that migration separately). -- New graph workloads leveraging Apache AGE—the extension must remain installed, but no graph schema or queries ships in this change. -- UI/UX redesigns or analytics-layer feature work; only config updates required to hit the new APIs/clusters are allowed. - -## Impact -- Replaces a foundational data store, so services will need configuration reloads and likely short maintenance windows during cutover/backfill. -- Requires new operational expertise: Timescale background workers, retention jobs, and CNPG backup/restore now become part of the standard on-call checklist. -- Dual writes temporarily increase resource usage (both Proton and CNPG ingest traffic) until we switch all reads to CNPG and decommission Proton. -- Any latent assumptions about Proton-specific SQL syntax (`table()`, `_tp_time`) will break; we must audit metrics/registry code paths to ensure they have Postgres equivalents or add compatibility layers. diff --git a/openspec/changes/migrate-proton-timeseries-to-cnpg/schema-mapping.md b/openspec/changes/migrate-proton-timeseries-to-cnpg/schema-mapping.md deleted file mode 100644 index 1badb13c6..000000000 --- a/openspec/changes/migrate-proton-timeseries-to-cnpg/schema-mapping.md +++ /dev/null @@ -1,90 +0,0 @@ -# Proton → CNPG schema mapping - -This document fulfills task **1.1** by enumerating every Proton stream/table that `pkg/db` and the registry rely on today and describing how each structure maps to the CNPG + Timescale schema introduced in `pkg/db/cnpg/migrations/00000000000001_timescale_schema.up.sql`. Each row lists the current Proton TTL window (taken directly from `pkg/db/migrations/*.sql`), the CNPG target object, its retention policy, and the implementation status so we can track any gaps that still need follow-up migrations. - -## Legend - -- **Existing** – implemented in `00000000000001_timescale_schema.up.sql`. -- **Planned** – needs an additional CNPG migration before we can cut over writes. -- **Merged/Dropped** – Proton table will be retired and the responsibility moves into another CNPG structure. -- **Deferred** – explicitly out of scope for this change (see proposal scope/out-of-scope); stays on Proton until the follow-on spec lands. - -## Telemetry & sysmon streams (3-day TTL in Proton) - -| Proton object | Proton TTL | CNPG target | CNPG retention | Status | Notes | -| --- | --- | --- | --- | --- | --- | -| `timeseries_metrics` | 3 days (`timestamp`/`_tp_time`) | `timeseries_metrics` Timescale hypertable (`timestamp`) | Timescale retention policy: 3 days | Existing | Field-for-field port; JSON `tags` and `metadata` become JSONB, plus device/time index (`idx_timeseries_metrics_device_time`). | -| `cpu_metrics` | 3 days | `cpu_metrics` hypertable (`timestamp`) | 3 days | Existing | Includes new columns from migration `00000000000002` (label, cluster) and device/time index. | -| `cpu_cluster_metrics` | 3 days | `cpu_cluster_metrics` hypertable (`timestamp`) | 3 days | Existing | Mirrors Proton schema and keeps hourly partitioning via Timescale. | -| `disk_metrics` | 3 days | `disk_metrics` hypertable (`timestamp`) | 3 days | Existing | Same schema; JSON-less so simple Timescale table. | -| `memory_metrics` | 3 days | `memory_metrics` hypertable (`timestamp`) | 3 days | Existing | Same as Proton. | -| `process_metrics` | 3 days | `process_metrics` hypertable (`timestamp`) | 3 days | Existing | Gains `created_at` column; retains host/poller/device indexes. | -| `netflow_metrics` | 3 days | `netflow_metrics` hypertable (`timestamp`) | 3 days | Existing | Schema normalized (JSONB metadata) plus retention policy. | -| `rperf_metrics` | 3 days | `rperf_metrics` hypertable (`timestamp`) | 3 days | Existing | `00000000000002_events_rperf_users.up.sql` provisions the table + retention so the Rust rperf writers can dual-write. | -| `device_metrics_summary` (MV) | 3 days | Timescale continuous aggregate (e.g., `device_metrics_summary_cagg`) fed from CPU/disk/memory hypertables | 3 days | Existing | `00000000000003_device_metrics_summary_cagg.up.sql` implements a 5-minute continuous aggregate that mirrors the Proton MV semantics. | -| `service_status` | 3 days | `service_status` hypertable (`timestamp`) | 3 days | Existing | All service heartbeat writes/readers move here via pgx; `created_at` is populated automatically. | -| `service_statuses` | 3 days | Fold into `service_status` hypertable | 3 days | Merged/Dropped | Proton kept both `service_status` and `service_statuses`; CNPG keeps a single hypertable and the dedup logic happens in SQL instead of two streams. | -| `services` | 3 days | `services` hypertable (`timestamp`) | 30 days | Existing | We intentionally extend retention to 30 days to ease config auditing; Timescale handles TTL. | - -## Discovery, sweep, and topology streams - -| Proton object | Proton TTL | CNPG target | CNPG retention | Status | Notes | -| --- | --- | --- | --- | --- | --- | -| `sweep_host_states` | 3 days (`last_sweep_time`) | `sweep_host_states` hypertable (`last_sweep_time`) | 3 days | Existing | Hypertable primary key `(host_ip, poller_id, partition)` matches Proton `versioned_kv` semantics. | -| `device_updates` | 3 days (`timestamp`) | `device_updates` hypertable (`observed_at`) | 3 days | Existing | Column rename clarifies semantics; used for deterministic merge inside `pkg/db/cnpg_unified_devices`. | -| `discovered_interfaces` | 3 days | `discovered_interfaces` hypertable (`timestamp`) | 3 days | Existing | Arrays become typed columns (`TEXT[]`), metadata becomes JSONB. | -| `topology_discovery_events` | 3 days | `topology_discovery_events` hypertable (`timestamp`) | 3 days | Existing | One-to-one port; still the backing store for topology replay APIs. | - -## Device inventory & registry tables - -| Proton object | Proton TTL | CNPG target | CNPG retention | Status | Notes | -| --- | --- | --- | --- | --- | --- | -| `unified_devices` | 3 days (should have been 30) | `unified_devices` relational table (PK `device_id`) | No TTL (managed by `last_seen` pruning jobs) | Existing | CNPG stores canonical rows directly, removing Proton `versioned_kv` + MV pipeline. | -| `unified_devices_registry` | 3 days | Covered by `unified_devices` + new registry write path | N/A | Merged/Dropped | Proton dual-stream is replaced with a single table plus explicit merge logic in `pkg/registry`. | -| `pollers` | 3 days TTL around `last_seen` | `pollers` relational table | No TTL | Existing | CNPG keeps full registry metadata plus counters, matching `pkg/db/pollers.go` expectations. | -| `agents` | No TTL | `agents` relational table | No TTL | Existing | Same column set as Proton stream; indexes on `poller_id`. | -| `checkers` | No TTL | `checkers` relational table | No TTL | Existing | Same schema; we retain `poller_id` column for lookups. | -| `service_registration_events` | 90 days | `service_registration_events` hypertable (`timestamp`) | 90 days | Existing | The append-only audit log retains its TTL via Timescale retention. | -| `poller_history` | 7 days | `poller_history` hypertable (`timestamp`) | 7 days | Existing | API consumers keep the same dataset while moving to pgx. | -| `poller_statuses` | 7 days | Use `poller_history` for status deltas; add SQL view if callers need snapshots | 7 days | Merged/Dropped | No Go code queries `poller_statuses`; we stop writing it once dual-writes to CNPG begin. | -| `service_status`/`service_statuses` | 3 days | `service_status` hypertable | 3 days | Existing/Merged | Covered above; registry consumers will hit pgx. | -| `services` | 3 days | `services` hypertable | 30 days | Existing | Same as telemetry table above; tracked here for registry completeness. | -| `events` | 3 days (`event_timestamp`) | `events` hypertable (`event_timestamp`) | 3 days | Existing | Implemented in `00000000000002_events_rperf_users.up.sql`; CNPG now stores CloudEvents with 3-day retention so the db-event-writer can dual-write. | -| `users` | No TTL | `users` relational table (PK `id`, unique `email`) | No TTL | Existing | `00000000000002_events_rperf_users.up.sql` adds the auth/users table with lowercased unique indexes to match Proton behavior. | - -## Edge onboarding & capabilities - -| Proton object | Proton TTL | CNPG target | CNPG retention | Status | Notes | -| --- | --- | --- | --- | --- | --- | -| `edge_onboarding_packages` | No TTL (`versioned_kv`) | `edge_onboarding_packages` relational table | No TTL | Existing | Exact schema parity; row versioning handled with `updated_at` column. | -| `edge_onboarding_events` | None (append-only MergeTree) | `edge_onboarding_events` hypertable (`event_time`) | 365 days | Existing | Timescale handles long-lived audit retention without manual TTL jobs. | -| `device_capabilities` | 90 days | `device_capabilities` hypertable (`last_checked`) | 90 days | Existing | Append-only audit table plus indexes on `(device_id, capability, service_id)`. | -| `device_capability_registry` | No TTL | `device_capability_registry` relational table | No TTL | Existing | Maintains “latest state” rows; updated through pgx upserts. | - -## Observability (SRQL-dependent) streams – deferred - -| Proton object | Proton TTL | CNPG target | CNPG retention | Status | Notes | -| --- | --- | --- | --- | --- | --- | -| `logs` | 3 days | `logs` hypertable | 3 days | Existing | Added in `00000000000004_otel_observability.up.sql`; `cmd/consumers/db-event-writer` now writes OTEL logs via pgx. | -| `otel_metrics` | 3 days | `otel_metrics` hypertable | 3 days | Existing | Same migration as above; fed exclusively by the CNPG-backed db-event-writer. | -| `otel_traces` | 3 days | `otel_traces` hypertable | 3 days | Existing | Hypertable + ingestion wiring landed with the observability migration so gRPC traces bypass Proton. | -| `otel_trace_summaries` | 3 days | `otel_trace_summaries` hypertable + views | 3 days | Deferred | Dependent on SRQL translator plans. | -| `otel_spans_enriched` | 3 days | `otel_spans_enriched` hypertable + pipeline | 3 days | Deferred | Remains on Proton until SRQL is ported. | -| `ocsf_device_inventory` | 30 days | Timescale table (one row per inventory event) | 30 days | Deferred | OCSF exports are not part of the timeseries-storage capability; Proton implementation continues until the OCSF alignment roadmap item is picked up. | -| `ocsf_network_activity` | 3 days | Timescale hypertable | 3 days | Deferred | Same reasoning as above. | -| `ocsf_user_inventory` | 90 days | Timescale table | 90 days | Deferred | 90-day retention stays Proton-only until we spec Postgres views. | -| `ocsf_system_activity` | 7 days | Timescale hypertable | 7 days | Deferred | Blocked on OCSF exporter work. | -| `ocsf_devices_current` | 90 days | Relational table | 90 days | Deferred | Derived from inventory events; will move once OCSF spec exists. | -| `ocsf_users_current` | 90 days | Relational table | 90 days | Deferred | Same as devices_current. | -| `ocsf_vulnerabilities_current` | 365 days | Relational table | 365 days | Deferred | Requires dedicated retention + compliance review. | -| `ocsf_services_current` | 90 days | Relational table | 90 days | Deferred | Covered by OCSF spec later. | -| `ocsf_observable_index` | 30 days | Hypertable/table | 30 days | Deferred | Observability search indexes stay in Proton until Postgres search plan exists. | -| `ocsf_observable_statistics` | 90 days | Hypertable/table | 90 days | Deferred | Same as above. | -| `ocsf_entity_relationships` | 30 days | Hypertable/table | 30 days | Deferred | Depends on graph/AGE rollout; intentionally out of scope. | -| `ocsf_search_performance` | 7 days | Hypertable/table | 7 days | Deferred | Remains Proton-backed; will be moved with analytics observability work. | - -## What’s next - -- Wire the Go writers/readers (metrics, events, auth) to the new CNPG tables so we can enable dual writes and parity checks. -- Keep Proton-only observability/OCSF datasets documented here so future specs can reference their TTL expectations. -- Update `pkg/registry` implementation work to rely on the relational tables described above (Tasks 2.2–2.4). diff --git a/openspec/changes/migrate-proton-timeseries-to-cnpg/specs/timeseries-storage/spec.md b/openspec/changes/migrate-proton-timeseries-to-cnpg/specs/timeseries-storage/spec.md deleted file mode 100644 index 299dce3a2..000000000 --- a/openspec/changes/migrate-proton-timeseries-to-cnpg/specs/timeseries-storage/spec.md +++ /dev/null @@ -1,54 +0,0 @@ -## ADDED Requirements -### Requirement: Timescale hypertables replace Proton telemetry streams -ServiceRadar MUST store every metric, poller/service heartbeat, and sweep/discovery event that currently lives in Timeplus Proton inside the CNPG cluster as Timescale hypertables with retention policies that mirror the Proton TTL rules. - -#### Scenario: Telemetry writes hit CNPG -- **GIVEN** `cmd/core` is configured with a CNPG DSN and the TimescaleDB extension is enabled -- **WHEN** `pkg/db.StoreMetrics`, `StoreSysmonMetrics`, `StoreNetflowMetrics`, or `PublishTopologyDiscoveryEvent` is invoked -- **THEN** the rows are inserted into the matching hypertable (`timeseries_metrics`, `cpu_metrics`, `disk_metrics`, `process_metrics`, `topology_discovery_events`, etc.), `time_bucket`/`created_at` columns are populated, and the insert succeeds without touching the Proton driver. - -#### Scenario: Retention windows are enforced -- **GIVEN** the CNPG cluster is running the new schema and Timescale background jobs -- **WHEN** metrics older than 3 days, discovery artifacts older than 7 days, or registry-support tables older than 30 days are present -- **THEN** the Timescale retention policies delete them on schedule so disk usage matches the existing Proton TTL behavior. - -#### Scenario: Read paths only use pgx -- **GIVEN** an API (e.g., `/api/metrics`, `/api/pollers`, `/api/sysmon`) that previously queried Proton through `github.com/timeplus-io/proton-go-driver` -- **WHEN** the handler runs -- **THEN** it executes the new SQL (window functions, continuous aggregates, or `time_bucket_gapfill`) through the shared pgx pool and never instantiates a Proton connection. - -### Requirement: Postgres-native unified device and registry tables -ServiceRadar MUST maintain canonical device inventory, poller/agent/checker registries, and the associated materialized state in ordinary Postgres tables with row-level version metadata instead of relying on Proton’s immutable streams. - -#### Scenario: Device updates converge deterministically -- **GIVEN** the registry manager receives a `models.DeviceUpdate` -- **WHEN** it calls the new Postgres-backed persistence layer -- **THEN** the update is appended to `device_updates_log`, merged into `unified_devices_current` via `INSERT ... ON CONFLICT`, and the `first_seen`/`last_seen`/metadata columns match the previous Proton MV semantics. - -#### Scenario: Registry queries keep working -- **GIVEN** a CLI or API request hits `pkg/registry.ServiceRegistry.GetPoller`/`ListPollers` -- **WHEN** the request executes -- **THEN** it reads from the Postgres tables (`pollers`, `agents`, `checkers`, `service_status` history) and returns the same shape/results that the Proton-backed queries produced. - -#### Scenario: Unified devices stay in sync with SRQL-unaware consumers -- **GIVEN** `/api/devices` or `/api/device/<id>` runs without SRQL involvement -- **WHEN** the request runs against the CNPG-backed implementation -- **THEN** the device count, pagination cursors, and metadata flags (deleted/merged markers) match what Proton returned for the same dataset. - -### Requirement: Proton-free runtime -ServiceRadar MUST operate exclusively against CNPG/Timescale and remove every dependency on the Proton driver, schema artifacts, and operational tooling. - -#### Scenario: Services boot without Proton configuration -- **GIVEN** a fresh deployment of `cmd/core`, `cmd/db-event-writer`, and the supporting Go binaries -- **WHEN** they load configuration -- **THEN** only CNPG DSNs/TLS files are required, Proton connection settings are ignored or deleted, and the Go services fail fast if CNPG is unavailable rather than attempting to dial Proton. - -#### Scenario: Codebase no longer references Proton helpers -- **GIVEN** the repository is built after this change lands -- **WHEN** developers search for `timeplus-io/proton-go-driver` (or Proton-specific SQL such as `table(...)`, `_tp_time`, `FINAL`) -- **THEN** no references remain under `pkg/`, `cmd/`, or `scripts/`, and all persistence layer tests exercise the pgx-backed CNPG implementation. - -#### Scenario: Operational docs mention CNPG only -- **GIVEN** an operator follows the updated runbooks -- **WHEN** they read `docs/docs/agents.md`, `docs/docs/runbooks`, or the demo cluster guides -- **THEN** every instruction references CNPG (migrations, health checks, resets), and Proton-specific guidance (PVC resets, dual-write toggles, rollback commands) has been removed or replaced with CNPG equivalents. diff --git a/openspec/changes/migrate-proton-timeseries-to-cnpg/tasks.md b/openspec/changes/migrate-proton-timeseries-to-cnpg/tasks.md deleted file mode 100644 index 4e1a7e0a1..000000000 --- a/openspec/changes/migrate-proton-timeseries-to-cnpg/tasks.md +++ /dev/null @@ -1,31 +0,0 @@ -## 1. CNPG + Timescale schema -- [x] 1.1 Inventory the current Proton schema (`pkg/db/migrations/*.sql`, registry queries) and produce a mapping doc that calls out table-by-table how it becomes a Postgres table or Timescale hypertable (including TTL/retention windows). *(See `openspec/changes/migrate-proton-timeseries-to-cnpg/schema-mapping.md`.)* -- [x] 1.2 Author the new CNPG migrations: create hypertables for metrics/sysmon/netflow/discovery, relational tables for unified devices/registry/edge onboarding/users, seed indexes, and register Timescale retention/compression policies that match the TTL plan. *(Initial `00000000000001_timescale_schema.up.sql` covers the telemetry, registry, onboarding, and capability tables; `00000000000002_events_rperf_users.up.sql` and `00000000000003_device_metrics_summary_cagg.up.sql` add CloudEvents/rperf/users plus the device metrics continuous aggregate.)* -- [x] 1.3 Wire the migrations into Bazel/`make` (similar to existing Proton migrations) and ensure they can be applied against a clean CNPG cluster plus the already-provisioned demo cluster. *(New `cmd/tools/cnpg-migrate` binary + `make cnpg-migrate` target run the embedded migrations via Go or Bazel.)* -- [x] 1.4 Confirm the CNPG image still exposes TimescaleDB + Apache AGE extensions and document the SQL required to `CREATE EXTENSION` + verify them in the telemetry database. *(`docs/docs/agents.md` now covers the telemetry `psql` commands that create and verify both extensions.)* - -## 2. Go data layer rewrite -- [x] 2.1 Introduce a shared pgx-based CNPG client (connection pooling, TLS) inside `pkg/db` and update service configuration structs to carry the new DSNs/flags needed for Proton+CNPG dual writes. *(`pkg/models` gained CNPG + routing config, and `pkg/db` now spins up the pgx pool with dual-write helpers.)* -- [x] 2.2 Port write paths (`StoreMetrics`, `StoreSysmonMetrics`, `StoreSweepHostStates`, `PublishDeviceUpdate`, edge onboarding, auth/users, etc.) to Postgres SQL; add unit tests for the new query builders. *(`pkg/db/auth.go` moved to pgx, CloudEvents land in CNPG via `pkg/db/events.go`, metrics/sysmon/sweep via `pkg/db/cnpg_metrics.go` + `pkg/db/cnpg_sweep.go`, device updates through `pkg/db/cnpg_unified_devices.go`, edge onboarding via `pkg/db/cnpg_edge_onboarding.go`, and the poller/service registry writers now dual-write through `pkg/db/cnpg_registry.go` with coverage in the new unit tests.)* -- [x] 2.3 Port read paths for metrics, discovery, devices, and registry lookups to Postgres, replacing Proton-specific constructs like `table(...)`, `_tp_time`, and `FINAL` with Timescale/SQL equivalents. *Poller/service registry APIs now branch on `StorageRouting` and hit the CNPG tables (`pollers`, `poller_history`, `service_status`, `services`) via pgx, and the device APIs reuse the CNPG-backed unified device helpers. This batch adds CNPG-backed readers for every metrics and sweep/discovery call site—`pkg/db/metrics.go`, `pkg/db/snmp_status.go`, and `pkg/db/sweep.go` now fan out to the pgx helpers in `pkg/db/cnpg_metrics_reads.go`/`pkg/db/cnpg_sweep.go`, with coverage in `pkg/db/cnpg_metrics_reads_test.go` and `pkg/db/cnpg_sweep_read_test.go`. Proton-only reads are no longer required once `StorageRouting.PrimaryBackend` flips to CNPG.* -- [x] 2.4 Update `pkg/registry` to operate on the new relational tables (explicit merging logic for unified devices, registry tables, service counts) and cover the behavior with updated tests or fixtures. *DeviceRegistry's identifier lookups (Armis IDs, NetBox IDs, MACs, and canonical IPs) now issue parameterized CNPG queries via the new helpers in `pkg/registry/registry.go` with coverage in `pkg/registry/registry_cnpg_test.go`, and the ServiceRegistry poller/agent/checker readers/writers now target the CNPG tables with dual-write support plus new coverage in `pkg/registry/service_registry_queries_test.go`, so both registry paths run on Timescale.* - -## 3. Proton driver removal -- [x] 3.1 Delete the Proton client/connection from `pkg/db` (no `Conn`, `NewStreamingConn`, or dual-write routing) so CNPG migrations and pgx pools are the only database initialization path. -- [x] 3.2 Rewrite the remaining Proton-backed code in `pkg/db` (`metrics.go`, `sweep.go`, `discovery.go`, `devices.go`, `services.go`, `pollers.go`, `netflow.go`, `edge_onboarding.go`, `capabilities.go`) so every read/write uses the CNPG helpers and no Proton SQL (`table(...)`, `_tp_time`, `FINAL`) remains. -- [x] 3.3 Remove Proton mocks/tests/build rules (`executeBatch`, Proton migration embeds, mock generators tied to `driver.Batch`) and update the db unit tests to run solely against the pgx-backed CNPG code. - -## 4. Service + tooling cleanup -- [x] 4.1 Convert `cmd/consumers/db-event-writer` to read/write CNPG tables for OTEL logs/metrics/traces, removing Proton config flags, schema assumptions, and driver dependencies. -- [x] 4.2 Replace Proton streaming utilities (core log digest tailer, stats aggregator parity checks, CLI helpers) with CNPG implementations or retire the features if they were Proton-only. -- [x] 4.3 Update CLI/config/installer tooling so Proton settings (`DBAddr`, Proton TLS certs, migration scripts) are removed, CNPG DSNs are required, and generated configs/scripts reference the Timescale schema exclusively. *(Dropped the Proton override fields from `pkg/models` + the admin UI, rewired the CLI TLS generator and install/setup scripts to emit/consume CNPG-only certs, forced the config loaders/UI to require the CNPG DSN fields, and updated the demo Kustomize overlays + `deploy.sh` so they render CNPG/SPFFE-aware configmaps without any Proton placeholders.)* - -## 5. Documentation + operations -- [x] 5.1 Purge Proton references from docs and runbooks (`docs/docs/agents.md`, runbook directory, demo README) and add CNPG-only operational procedures (migrations, resets, health checks). -- [x] 5.2 Remove Proton deployments/manifests/Helm values from `k8s/` and replace them with CNPG-only overlays (including any reset scripts or PVC instructions). -- [x] 5.3 Update scripts/automation (e.g., `reset-proton.sh`, backfill helpers, CI pipelines) to either target CNPG or be deleted if obsolete, ensuring `make`/Bazel targets no longer expect Proton artifacts. - -## 6. Validation + observability -- [x] 6.1 Add CNPG-focused smoke/integration tests that cover `/api/devices`, `/api/metrics`, `/api/registry`, and the db-event-writer ingestion path so we can prove the Proton-free stack works end-to-end. *(`scripts/cnpg-smoke.sh` hits the device + registry APIs, queries CNPG metrics, and exercises the db-event-writer path against the demo-staging namespace.)* -- [x] 6.2 Refresh monitoring/dashboards to track CNPG ingestion/retention health (Timescale hypertables, retention jobs, pgx error metrics) and remove Proton parity dashboards/checks. *Added the new `docs/docs/cnpg-monitoring.md` dashboard cookbook, cross-referenced it from `docs/docs/agents.md`, and rewrote the OTEL/SNMP/Syslog/architecture/service-port-map/cluster docs so every “where does this land?” answer now points at CNPG instead of Proton.* -- [x] 6.3 Debug demo-staging `serviceradar-db-event-writer` duplicate key errors by adding `ON CONFLICT DO NOTHING` to the CNPG OTEL insert helpers, rebuilding/pushing `ghcr.io/carverauto/serviceradar-db-event-writer:sha-84d6976bf1ab3bc8e515e178393a1e3603c17ce2`, and rolling the deployment so logs show successful `rows_processed` entries instead of `*_logs_pkey` violations. diff --git a/openspec/changes/remove-identity-kv-cache/design.md b/openspec/changes/remove-identity-kv-cache/design.md new file mode 100644 index 000000000..49e228761 --- /dev/null +++ b/openspec/changes/remove-identity-kv-cache/design.md @@ -0,0 +1,13 @@ +# Design Notes: Removing KV from identity paths + +## Principle +CNPG is the source of truth for identity resolution and canonicalization. KV should not participate in identity correctness or be required for identity hot paths. + +## Expected Result +- Identity lookups remain correct when datasvc/NATS KV is unavailable. +- No new `device_canonical_map/*` keys are created in the KV bucket during normal operation. +- Any remaining identity KV entries in existing environments are treated as legacy cache artifacts. + +## Risks / Mitigations +- **Increased DB load on cold start**: mitigate via registry hydration on startup and in-memory cache TTL tuning. +- **Operational confusion**: docs explicitly state KV is not used for identity. diff --git a/openspec/changes/remove-identity-kv-cache/proposal.md b/openspec/changes/remove-identity-kv-cache/proposal.md new file mode 100644 index 000000000..9a5f8724d --- /dev/null +++ b/openspec/changes/remove-identity-kv-cache/proposal.md @@ -0,0 +1,28 @@ +# Change: Remove KV identity cache/hydration from core + +## Why +We want KV to be used for configuration (and other non-identity workflows like edge onboarding), not for device identity canonicalization or caching. + +Although the KV-backed identity map **publisher** has been removed/disabled, the core service still performs **KV identity caching/hydration**: +- The demo cluster’s `serviceradar-datasvc` KV bucket currently contains `100,000` `device_canonical_map/*` entries (50k `ip/*` + 50k `partition-ip/*`), showing the core is actively writing identity records into KV. + - This is not “100k devices”; it is ~2 keys per device/IP (`ip/*` + `partition-ip/*`). With IP churn or multi-IP devices, the key count can exceed `2 * device_count` within the TTL window. +- This makes identity behavior harder to reason about (“are we using KV or not?”), and creates an implicit dependency on datasvc/NATS for identity hot paths. + - At scale (e.g., 2M devices), even `2 * N` keys is millions of KV entries competing with config data in the same bucket. + +This change removes KV from the identity path entirely so identity resolution is always CNPG + in-memory caches, while KV remains for configs/edge workflows. + +## What Changes +- Core no longer reads KV for identity lookup or sweep canonicalization. +- Core no longer hydrates identity mappings into KV (`device_canonical_map/*`). +- `GetCanonicalDevice` continues to exist but becomes CNPG-only (no KV-first lookup, no KV hydration side effects). +- Documentation and operational guidance are updated to reflect that KV is not part of the identity path. + +## Impact +- Affected specs: `device-identity-reconciliation` +- Affected code (expected): + - `pkg/core/identity_lookup.go` (remove KV-first + hydration) + - `pkg/core/result_processor.go` (remove KV identity cache reads/writes) + - `pkg/core/server.go` (stop wiring KV client into identity paths) + - `cmd/tools/kv-sweep` (deprecate or remove identity-specific behaviors, if appropriate) +- Performance/behavior: increased CNPG/in-memory reliance during cache misses; reduced datasvc/NATS dependency; removes identity KV growth. +- Migration: existing `device_canonical_map/*` keys become legacy/unused and can be cleaned up separately (optional). diff --git a/openspec/changes/remove-identity-kv-cache/specs/device-identity-reconciliation/spec.md b/openspec/changes/remove-identity-kv-cache/specs/device-identity-reconciliation/spec.md new file mode 100644 index 000000000..b6742f8a8 --- /dev/null +++ b/openspec/changes/remove-identity-kv-cache/specs/device-identity-reconciliation/spec.md @@ -0,0 +1,14 @@ +## MODIFIED Requirements + +### Requirement: CNPG-Authoritative Identity Canonicalization +The system SHALL treat CNPG as the authoritative source of canonical device identity and SHALL NOT read or write KV (`device_canonical_map/*`) as part of identity resolution or sweep canonicalization. + +#### Scenario: Identity works without KV +- **GIVEN** the datasvc/NATS KV system is unavailable +- **WHEN** the core processes sweep results or resolves canonical device identities +- **THEN** identity resolution continues using CNPG-backed paths and in-memory caches +- **AND** no KV identity reads or writes are required for correctness + +#### Scenario: No identity KV entries are created +- **WHEN** the core runs under normal operation +- **THEN** it does not create or update `device_canonical_map/*` keys in KV diff --git a/openspec/changes/remove-identity-kv-cache/tasks.md b/openspec/changes/remove-identity-kv-cache/tasks.md new file mode 100644 index 000000000..f3ab00b65 --- /dev/null +++ b/openspec/changes/remove-identity-kv-cache/tasks.md @@ -0,0 +1,23 @@ +## 1. Confirm Current Behavior (Demo + Code) +- [ ] 1.1 Capture current demo KV identity key counts by prefix and kind (`device_canonical_map/ip/*`, `device_canonical_map/partition-ip/*`) +- [ ] 1.2 Identify all runtime code paths that read/write `device_canonical_map/*` (lookup + hydration) +- [ ] 1.3 Confirm no other services rely on identity KV keys (only core) +- [ ] 1.4 Explain key cardinality (e.g., `2 * unique_ips` in TTL window) and validate against CNPG device/IP counts in demo + +## 2. Remove Identity KV from Core Lookups +- [ ] 2.1 Update `GetCanonicalDevice` to skip KV and resolve via CNPG-backed paths only +- [ ] 2.2 Remove identity KV hydration (`hydrateIdentityKV`) or make it unreachable +- [ ] 2.3 Ensure OTEL lookup metrics still make sense without KV (`resolved_via` labels) + +## 3. Remove Identity KV from Sweep Canonicalization +- [ ] 3.1 Remove KV read step in sweep canonicalization (`fetchCanonicalSnapshotsFromKV`) +- [ ] 3.2 Remove KV hydration for sweep snapshots (`persistIdentityForSnapshot`) +- [ ] 3.3 Ensure fallback order remains correct and efficient (in-memory cache → registry → CNPG) + +## 4. Cleanup / Tooling +- [ ] 4.1 Evaluate `cmd/tools/kv-sweep` usage and either deprecate identity modes or remove them +- [ ] 4.2 Add a short runbook note for optional manual cleanup of legacy `device_canonical_map/*` keys (if retained) + +## 5. Docs + Validation +- [ ] 5.1 Update `docs/docs/architecture.md` to remove KV identity cache assumptions +- [ ] 5.2 Run `go test ./...` and `openspec validate remove-identity-kv-cache --strict` diff --git a/openspec/changes/remove-kv-canonical-identity-map/design.md b/openspec/changes/remove-kv-canonical-identity-map/design.md new file mode 100644 index 000000000..c4cdae8e0 --- /dev/null +++ b/openspec/changes/remove-kv-canonical-identity-map/design.md @@ -0,0 +1,19 @@ +# Design Notes: KV canonical identity map cleanup + +## Current State (Observed) +- Core initializes a KV client for features that still require it (edge onboarding, identity lookups), but explicitly states the KV identity map **publisher** is disabled. +- Registry canonicalization is CNPG-backed via `IdentityEngine` and persisted in `unified_devices` + `device_identifiers`. +- Core may still read/hydrate limited identity keys in KV as an optimization during sweep processing and `GetCanonicalDevice` requests, but this is cache-only and must degrade gracefully. +- Sync integrations include legacy code to resolve canonical identities via KV, but now intentionally avoid KV reads because canonical resolution happens centrally. + +## Goal +Align the codebase and docs with the current architecture by removing the unused publisher and any unreachable KV canonical-map code paths, while keeping the KV features that are still active (edge onboarding, config/seeding, cache-only sweep hydration). + +## Non-Goals +- Changing canonical identity rules or introducing new identity kinds. +- Removing datasvc/KV as a product dependency (KV is still required for configuration and edge onboarding). +- Migrating existing customer KV buckets in this change (if any exist, provide an operational note, not an automated migration). + +## Compatibility / Migration Considerations +- If any environments still have canonical identity buckets from older versions, ensure they can be safely ignored or cleaned up with existing tooling (or document a manual cleanup path). +- Backfill paths that seed KV identity keys should be evaluated: either keep as explicitly “legacy tooling” or remove if no longer used. diff --git a/openspec/changes/remove-kv-canonical-identity-map/proposal.md b/openspec/changes/remove-kv-canonical-identity-map/proposal.md new file mode 100644 index 000000000..c20b29437 --- /dev/null +++ b/openspec/changes/remove-kv-canonical-identity-map/proposal.md @@ -0,0 +1,25 @@ +# Change: Remove KV canonical identity map publisher and dead code + +## Why +GitHub issue `#2152` proposes fixes to `BuildKeysFromRecord` so alias identity keys can be reconstructed and stale alias keys deleted from the KV-backed canonical identity map. + +Current implementation no longer uses KV as the authoritative device registry/canonicalization store: +- Core explicitly disables the KV identity map **publisher** (to avoid write amplification). +- Canonical identity resolution is performed via CNPG-backed `IdentityEngine` + `DeviceRegistry`, with KV used only as an optional cache/hydration layer for limited lookups (e.g. sweep IP caching). + +As a result, the code path referenced in `#2152` appears to be effectively dead in normal operation, and the proposed fix may be unnecessary. The repository still contains legacy KV canonical-map publishing and KV lookup scaffolding that increases maintenance burden and creates confusion about the current identity architecture. + +## What Changes +- Remove the unused KV canonical identity map publisher from the registry (`pkg/registry/identity_publisher.go`) and associated tests. +- Remove or simplify identitymap helpers that only exist to support the removed publisher (e.g. record→key reconstruction and alias-key KV support that is no longer written/read). +- Remove dead KV canonical-map lookup scaffolding from sync integrations (Armis/NetBox) where canonical identity resolution is now handled centrally by core. +- Update documentation to reflect the CNPG-authoritative identity flow and the limited KV cache/hydration role (and remove references to the removed publisher behavior). + +## Impact +- Affected specs: `device-identity-reconciliation` +- Affected code: + - `pkg/registry/*` (remove identity publisher wiring and dead types) + - `pkg/identitymap/*` (drop publisher-only helpers) + - `pkg/sync/integrations/{armis,netbox}/*` (remove dead canonical KV lookup scaffolding) + - `docs/docs/architecture.md` (update identity canonicalization narrative) +- Behavior change: none intended for normal operation (publisher is currently disabled); reduces confusion and removes legacy paths. diff --git a/openspec/changes/remove-kv-canonical-identity-map/specs/device-identity-reconciliation/spec.md b/openspec/changes/remove-kv-canonical-identity-map/specs/device-identity-reconciliation/spec.md new file mode 100644 index 000000000..b417b2c32 --- /dev/null +++ b/openspec/changes/remove-kv-canonical-identity-map/specs/device-identity-reconciliation/spec.md @@ -0,0 +1,24 @@ +## ADDED Requirements + +### Requirement: CNPG-Authoritative Identity Canonicalization +The system SHALL treat CNPG (via `IdentityEngine` + `DeviceRegistry`) as the authoritative source of canonical device identity, and SHALL NOT rely on KV as the source of truth for identity reconciliation. + +#### Scenario: Registry processes updates without KV +- **WHEN** the core registry processes a batch of device updates +- **THEN** canonical device IDs are resolved and persisted via CNPG-backed identity reconciliation +- **AND** the registry does not require KV to be available to complete identity reconciliation + +### Requirement: KV Identity Lookups Are Cache-Only +The system MAY use KV as a cache/hydration layer for limited identity lookups (e.g., IP and partition:IP used during sweep processing), but MUST continue to resolve identities correctly when KV is unavailable. + +#### Scenario: KV miss falls back to CNPG +- **WHEN** a canonical identity lookup misses in KV +- **THEN** the system falls back to CNPG-backed lookup paths +- **AND** MAY hydrate the KV cache from the CNPG result + +## REMOVED Requirements + +### Requirement: Registry KV Canonical Identity Publisher +**Reason**: The KV canonical identity map publisher is disabled in normal operation due to write amplification and is not part of the current CNPG-authoritative reconciliation flow. + +**Migration**: Existing KV canonical-map data (if present from older deployments) can be treated as legacy cache data and cleaned up manually using tooling/runbooks. diff --git a/openspec/changes/remove-kv-canonical-identity-map/tasks.md b/openspec/changes/remove-kv-canonical-identity-map/tasks.md new file mode 100644 index 000000000..6adc99b6e --- /dev/null +++ b/openspec/changes/remove-kv-canonical-identity-map/tasks.md @@ -0,0 +1,27 @@ +## 1. Confirm Current Runtime Behavior +- [x] 1.1 Trace core startup to confirm the KV identity map publisher is not wired into `DeviceRegistry` +- [x] 1.2 Enumerate remaining KV reads/writes related to identity and classify them (cache-only vs authoritative) +- [x] 1.3 Confirm no Helm/chart or runtime config path re-enables the removed publisher + +## 2. Remove Registry KV Identity Publisher +- [x] 2.1 Delete `pkg/registry/identity_publisher.go` and tests that only cover publisher behavior +- [x] 2.2 Remove `DeviceRegistry.identityPublisher` field and `publishIdentityMap` call sites +- [x] 2.3 Ensure registry construction no longer exposes `WithIdentityPublisher` + +## 3. Trim Identity Map Helpers +- [x] 3.1 Remove `BuildKeysFromRecord` if no longer referenced after publisher removal +- [x] 3.2 Remove alias-related KV identity key derivation if it is no longer written/read anywhere +- [x] 3.3 Ensure remaining identitymap utilities cover only the supported identity key kinds used by core (lookups/hydration/backfill) + +## 4. Remove Dead Canonical KV Lookup Code in Sync Integrations +- [x] 4.1 Remove `prefetchCanonicalEntries` no-op stubs and any unreachable “direct KV lookup” fallback branches +- [x] 4.2 Remove unused key-ordering/canonical-record resolution helpers if no longer needed +- [x] 4.3 Keep KV usage that is still required for sync workflows (e.g., sweep config writes) intact + +## 5. Update Documentation +- [x] 5.1 Update `docs/docs/architecture.md` to reflect CNPG-authoritative canonicalization and clarify KV’s cache-only identity role +- [x] 5.2 Remove stale rollout/metrics guidance that assumes the registry publishes canonical map keys to KV + +## 6. Validation +- [x] 6.1 Run `go test ./...` (or the closest repo-standard subset) and fix only failures caused by this change +- [x] 6.2 Run `openspec validate remove-kv-canonical-identity-map --strict` diff --git a/openspec/changes/remove-registry-proton-shim/proposal.md b/openspec/changes/remove-registry-proton-shim/proposal.md deleted file mode 100644 index d014257ff..000000000 --- a/openspec/changes/remove-registry-proton-shim/proposal.md +++ /dev/null @@ -1,17 +0,0 @@ -# Change: Remove Proton-style registry compatibility shim - -## Why -- The Proton migration spec promised that registry queries would run directly on CNPG/pgx, yet `pkg/registry` still routes every query and insert through `db.Conn`, a compatibility layer that rewrites `?` placeholders and emulates the Proton batch API (`pkg/db/db.go`). -- This shim is the last code path that references "Proton" inside `pkg/db` (`pkg/db/interfaces.go:34`), keeps the `Service` interface tied to legacy semantics, and makes it harder to reason about prepared statements or benefit from pgx features. -- Because the compatibility layer lives in `pkg/db`, any future schema work must understand both the typed CNPG helpers *and* the shimmed query flow, increasing maintenance risk. - -## What Changes -- Remove `DB.Conn`/`CompatConn` from `pkg/db` and migrate every `pkg/registry` call site to typed helpers (`ExecCNPG`, `QueryCNPGRows`, or new helper methods dedicated to registry queries/events). -- Replace the `PrepareBatch` based insert for `service_registration_events` with a pgx `Batch` helper that works natively with `$n` placeholders and typed metadata marshaling. -- Update the `Service` interface/docs so it clearly describes CNPG responsibilities instead of Proton streams. -- Expand registry-focused unit tests/mocks so they cover the new helpers and prove no code depends on Proton-style placeholder rewriting. - -## Impact -- Touches `pkg/db` and `pkg/registry`, so we must rerun the Bazel Go tests covering both packages. -- Slight refactors for callers that expected `db.Conn` to exist; mock generation may need updates after the interface changes. -- No user-facing behavior change is expected, but the cleanup removes the last Proton references inside the Go data layer and makes future schema or registry work easier. diff --git a/openspec/changes/remove-registry-proton-shim/specs/timeseries-storage/spec.md b/openspec/changes/remove-registry-proton-shim/specs/timeseries-storage/spec.md deleted file mode 100644 index bbf6af128..000000000 --- a/openspec/changes/remove-registry-proton-shim/specs/timeseries-storage/spec.md +++ /dev/null @@ -1,13 +0,0 @@ -## MODIFIED Requirements -### Requirement: Postgres-native unified device and registry tables -ServiceRadar MUST maintain canonical device inventory, poller/agent/checker registries, and the associated materialized state in ordinary Postgres tables with row-level version metadata instead of relying on Proton’s immutable streams. - -#### Scenario: Registry queries keep working -- **GIVEN** a CLI or API request hits `pkg/registry.ServiceRegistry.GetPoller`/`ListPollers` -- **WHEN** the request executes -- **THEN** it reads from the Postgres tables (`pollers`, `agents`, `checkers`, `service_status` history) and returns the same shape/results that the Proton-backed queries produced. - -#### Scenario: Registry uses native pgx helpers -- **GIVEN** any registry read/write (`DeleteService`, `PurgeInactive`, registration events, etc.) -- **WHEN** it touches the database -- **THEN** the code calls the shared CNPG helpers (or pgx APIs) directly with `$n` placeholders—no Proton-style `?` rewriting, compatibility shims, or `PrepareBatch` emulation remain in `pkg/db`. diff --git a/openspec/changes/remove-registry-proton-shim/tasks.md b/openspec/changes/remove-registry-proton-shim/tasks.md deleted file mode 100644 index 44887e3e6..000000000 --- a/openspec/changes/remove-registry-proton-shim/tasks.md +++ /dev/null @@ -1,9 +0,0 @@ -## 1. Registry compatibility removal -- [x] 1.1 Inventory `pkg/registry` usages of `db.Conn` (Query/QueryRow/Exec/PrepareBatch) and outline the SQL that needs `$n` placeholder rewrites. -- [x] 1.2 Introduce CNPG helpers in `pkg/db` (e.g., `InsertServiceRegistrationEvent`, `QueryRegistryRows`) so registry callers can share batching/error handling without rolling their own `pgx` plumbing. -- [x] 1.3 Update every registry query/write to call the new helpers (or `ExecCNPG`/`QueryCNPGRows`) with `$n` placeholders, ensuring tests cover the new code paths. -- [x] 1.4 Delete `DB.Conn`, `CompatConn`, and the associated shim errors; regenerate mocks so the `Service` interface no longer exposes Proton-era methods. - -## 2. Documentation/tests -- [x] 2.1 Refresh `pkg/db/interfaces.go` comments (and any developer docs) to describe the CNPG responsibilities instead of Timeplus Proton. -- [x] 2.2 Extend registry unit tests (and integration tests if needed) to validate the pgx-based implementations, including service event inserts and purge/delete flows. diff --git a/openspec/changes/rename-sysmon-vm-to-sysmon-osx/proposal.md b/openspec/changes/rename-sysmon-vm-to-sysmon-osx/proposal.md deleted file mode 100644 index 16e96e331..000000000 --- a/openspec/changes/rename-sysmon-vm-to-sysmon-osx/proposal.md +++ /dev/null @@ -1,155 +0,0 @@ -# Change: Rename sysmon-vm package to sysmon-osx - -## Status: PROPOSED - -## Why -The `sysmon-vm` package name is misleading. The component was originally developed for testing in a Linux VM environment, but it is actually a macOS/darwin system monitor checker that collects CPU, memory, and process metrics from Apple Silicon hosts. The "vm" suffix suggests virtual machine monitoring, which is not the component's purpose. - -Renaming to `sysmon-osx` clarifies: -1. The target platform (macOS/darwin) -2. The component's role as an OS-level system monitor -3. Distinction from other sysmon checkers that may exist for different platforms - -## What Changes - -### 1. Package & Directory Renames -| Old Path | New Path | -|----------|----------| -| `cmd/checkers/sysmon-vm/` | `cmd/checkers/sysmon-osx/` | -| `pkg/checker/sysmonvm/` | `pkg/checker/sysmonosx/` | -| `scripts/sysmonvm/` | `scripts/sysmonosx/` | -| `tools/sysmonvm/` | `tools/sysmonosx/` | -| `packaging/sysmonvm_host/` | `packaging/sysmonosx_host/` | - -### 2. Binary & Service Names -| Old Name | New Name | -|----------|----------| -| `serviceradar-sysmon-vm` | `serviceradar-sysmon-osx` | -| `com.serviceradar.sysmonvm` (LaunchDaemon) | `com.serviceradar.sysmonosx` | -| `serviceradar-sysmon-vm.service` (systemd) | `serviceradar-sysmon-osx.service` | - -### 3. Configuration Files -| Old Name | New Name | -|----------|----------| -| `sysmon-vm.json` | `sysmon-osx.json` | -| `sysmon-vm.json.example` | `sysmon-osx.json.example` | -| `sysmon-vm.checker.json` | `sysmon-osx.checker.json` | - -### 4. Log Paths (macOS) -| Old Path | New Path | -|----------|----------| -| `/var/log/serviceradar/sysmon-vm.log` | `/var/log/serviceradar/sysmon-osx.log` | -| `/var/log/serviceradar/sysmon-vm.err.log` | `/var/log/serviceradar/sysmon-osx.err.log` | - -### 5. Service Registry & Config Registry -- Update `pkg/config/registry.go`: `"sysmon-vm-checker"` → `"sysmon-osx-checker"` -- Update `pkg/agent/registry.go`: `"sysmon-vm"` → `"sysmon-osx"` -- Update KV key: `config/sysmon-vm-checker.json` → `config/sysmon-osx-checker.json` - -### 6. Makefile Targets -Rename all `sysmonvm-*` targets to `sysmonosx-*`: -- `sysmonosx-host-setup` -- `sysmonosx-build-checker-darwin` -- `sysmonosx-host-install` -- `sysmonosx-host-package` -- (Remove VM-related targets that are no longer applicable) - -### 7. Web UI Components -| Old Name | New Name | -|----------|----------| -| `SysmonVmDetails.tsx` | `SysmonOsxDetails.tsx` | -| `sysmon.ts` (types) | Update internal type names if they reference "vm" | - -### 8. Docker Compose -- Update service name in compose files -- Update environment variable: `SYSMON_VM_ADDRESS` → `SYSMON_OSX_ADDRESS` - -### 9. Documentation -- `cmd/checkers/sysmon-vm/README.md` → `cmd/checkers/sysmon-osx/README.md` -- Update all references in runbooks (`sysmonvm-e2e.md`, `compose-mtls-sysmonvm.md`) -- Update CHANGELOG references - -### 10. TLS Demo Certificates -Rename demo certificate files: -- `tls/demo/sysmon*` → appropriate new naming - -### 11. CI/CD -- Update `.github/workflows/clang-tidy.yml` path triggers -- Update any other workflow files referencing sysmon-vm paths - -## Migration Path - -### Backward Compatibility (Optional) -For existing deployments, consider: -1. Adding config migration logic to detect old paths and log deprecation warnings -2. Symlinks from old binary/config paths to new ones (for one release cycle) -3. KV store migration script to update `sysmon-vm-checker` → `sysmon-osx-checker` keys - -### Breaking Change Approach (Recommended) -Given that sysmon-vm is a relatively new component with limited deployment: -1. Clean rename without backward compatibility shims -2. Document upgrade path in release notes -3. Require users to: - - Reinstall the package (new paths) - - Update poller configurations - - Re-enroll if using edge onboarding - -## Files to Modify - -### Go Source -| File | Change | -|------|--------| -| `pkg/config/registry.go` | Update service type registration | -| `pkg/agent/registry.go` | Update case statement | -| `pkg/checker/sysmonvm/*.go` → `pkg/checker/sysmonosx/*.go` | Rename package, update imports | -| `cmd/checkers/sysmon-vm/main.go` → `cmd/checkers/sysmon-osx/main.go` | Update paths, service name | - -### Build & Package -| File | Change | -|------|--------| -| `cmd/checkers/sysmon-vm/BUILD.bazel` | Move and update | -| `packaging/sysmonvm_host/BUILD.bazel` | Move and update package rules | -| `Makefile` | Rename targets | -| `scripts/sysmonvm/*.sh` | Move and update | - -### Configuration -| File | Change | -|------|--------| -| `docker/compose/sysmon-vm.checker.json` | Rename and update | -| `docker/compose/poller.docker.json` | Update service reference | -| `cmd/poller/config.json` | Update service reference | -| `packaging/poller/config/poller.json` | Update service reference | - -### Service Definitions -| File | Change | -|------|--------| -| `cmd/checkers/sysmon-vm/hostmac/com.serviceradar.sysmonvm.plist` | Rename and update | -| `tools/sysmonvm/serviceradar-sysmon-vm.service` | Rename and update | - -### Web UI -| File | Change | -|------|--------| -| `web/src/components/Service/SysmonVmDetails.tsx` | Rename | -| `web/src/components/Service/Dashboard.tsx` | Update imports | -| Related component files | Update imports/references | - -### Documentation -| File | Change | -|------|--------| -| `cmd/checkers/sysmon-vm/README.md` | Move and update | -| `docs/docs/runbooks/sysmonvm-e2e.md` | Rename and update | -| `docs/docs/runbooks/compose-mtls-sysmonvm.md` | Rename and update | -| `CHANGELOG.md` | Note rename in next release | - -## Impact -- Affected specs: sysmon-telemetry (update references) -- Affected code: All sysmon-vm related packages, configs, and scripts -- Existing deployments: Will require reinstallation with new package - -## Verification -1. Build sysmon-osx binary: `make sysmonosx-build-checker-darwin` -2. Package installer: `make sysmonosx-host-package` -3. Install and verify LaunchDaemon starts correctly -4. Verify metrics flow: sysmon-osx → poller → core → UI -5. Run E2E test with renamed components -6. Verify no residual "sysmon-vm" references in codebase (grep check) diff --git a/openspec/changes/rename-sysmon-vm-to-sysmon-osx/tasks.md b/openspec/changes/rename-sysmon-vm-to-sysmon-osx/tasks.md deleted file mode 100644 index 816bf8fdc..000000000 --- a/openspec/changes/rename-sysmon-vm-to-sysmon-osx/tasks.md +++ /dev/null @@ -1,92 +0,0 @@ -## 1. Preparation -- [x] 1.1 Create tracking issue for the rename work -- [x] 1.2 Ensure all sysmon-vm related PRs are merged or closed before starting - -## 2. Go Package Renames -- [x] 2.1 Rename `pkg/checker/sysmonvm/` → `pkg/checker/sysmonosx/` (update package declaration) -- [x] 2.2 Update all import paths referencing `pkg/checker/sysmonvm` -- [x] 2.3 Rename `cmd/checkers/sysmon-vm/` → `cmd/checkers/sysmon-osx/` -- [x] 2.4 Update `pkg/config/registry.go`: change `"sysmon-vm-checker"` to `"sysmon-osx-checker"` -- [x] 2.5 Update `pkg/agent/registry.go`: change `"sysmon-vm"` case to `"sysmon-osx"` -- [x] 2.6 Update any other Go files with sysmon-vm references - -## 3. Build System -- [x] 3.1 Move and update `cmd/checkers/sysmon-vm/BUILD.bazel` → `cmd/checkers/sysmon-osx/BUILD.bazel` -- [x] 3.2 Move and update `packaging/sysmonvm_host/BUILD.bazel` → `packaging/sysmonosx_host/BUILD.bazel` -- [x] 3.3 Update Makefile: rename all `sysmonvm-*` targets to `sysmonosx-*` -- [x] 3.4 Remove obsolete VM-related Makefile targets (vm-create, vm-start, vm-ssh, etc.) -- [ ] 3.5 Verify `bazel build` and `bazel test` pass - -## 4. Scripts -- [x] 4.1 Move `scripts/sysmonvm/` → `scripts/sysmonosx/` -- [x] 4.2 Update script names (remove vm-* prefix, keep host-* and build-*) -- [x] 4.3 Update internal script references to new paths -- [x] 4.4 Remove scripts that are only for VM management (vm-create.sh, vm-start.sh, etc.) - -## 5. Configuration Files -- [x] 5.1 Rename `docker/compose/sysmon-vm.checker.json` → `docker/compose/sysmon-osx.checker.json` -- [x] 5.2 Update service name inside checker config from `"sysmon-vm"` to `"sysmon-osx"` -- [x] 5.3 Update `docker/compose/poller.docker.json` service references -- [x] 5.4 Update `cmd/poller/config.json` service references -- [x] 5.5 Update `packaging/poller/config/poller.json` service references -- [x] 5.6 Rename example config: `sysmon-vm.json.example` → `sysmon-osx.json.example` - -## 6. Service Definitions -- [x] 6.1 Rename LaunchDaemon plist: `com.serviceradar.sysmonvm.plist` → `com.serviceradar.sysmonosx.plist` -- [x] 6.2 Update plist contents (Label, Program path, log paths) -- [x] 6.3 Rename/update systemd service file (if keeping Linux support) -- [x] 6.4 Update default config/binary paths in plist and service files - -## 7. Tools Directory -- [x] 7.1 Move `tools/sysmonvm/` → `tools/sysmonosx/` -- [x] 7.2 Update `config.example.yaml` if still applicable -- [x] 7.3 Remove VM-specific tooling that is no longer needed - -## 8. Web UI -- [x] 8.1 Rename `web/src/components/Service/SysmonVmDetails.tsx` → `SysmonOsxDetails.tsx` -- [x] 8.2 Update component exports and imports in `Dashboard.tsx` -- [x] 8.3 Update any type definitions in `web/src/types/sysmon.ts` if they reference "vm" -- [x] 8.4 Update `WatcherTelemetryPanel.tsx` references -- [x] 8.5 Update `metric-components.jsx` references -- [x] 8.6 Update edge-packages admin page references -- [ ] 8.7 Run `npm run build` to verify no TypeScript errors - -## 9. Docker Compose -- [x] 9.1 Update environment variable `SYSMON_VM_ADDRESS` → `SYSMON_OSX_ADDRESS` -- [x] 9.2 Update service names in compose files -- [x] 9.3 Update `poller-stack.compose.yml` if applicable - -## 10. Documentation -- [x] 10.1 Move `cmd/checkers/sysmon-vm/README.md` → `cmd/checkers/sysmon-osx/README.md` -- [x] 10.2 Update README content (all path/name references) -- [x] 10.3 Rename `docs/docs/runbooks/sysmonvm-e2e.md` → `sysmonosx-e2e.md` -- [x] 10.4 Rename `docs/docs/runbooks/compose-mtls-sysmonvm.md` → `compose-mtls-sysmonosx.md` -- [x] 10.5 Update runbook contents -- [ ] 10.6 Add migration notes to CHANGELOG.md - -## 11. TLS Demo Assets -- [x] 11.1 Rename `tls/demo/sysmon*` files to use new naming -- [x] 11.2 Update any scripts that reference these files - -## 12. CI/CD -- [x] 12.1 Update `.github/workflows/clang-tidy.yml` path triggers -- [x] 12.2 Check for any other workflow files with sysmon-vm references -- [ ] 12.3 Verify CI passes with renamed paths - -## 13. OpenSpec Updates -- [x] 13.1 Archive old sysmon-vm fix proposals (already implemented - kept as historical reference) -- [x] 13.2 Update `fix-sysmon-vm-metrics-availability/` references if needed (kept as historical reference) -- [x] 13.3 Update `fix-sysmon-vm-macos-service-startup/` references if needed (kept as historical reference) - -## 14. Final Verification -- [x] 14.1 Run grep to find any missed references (remaining references are historical/test data) -- [ ] 14.2 Build all targets: `make sysmonosx-build-checker-darwin` -- [ ] 14.3 Package installer: `make sysmonosx-host-package` -- [ ] 14.4 Install on macOS test host and verify LaunchDaemon starts -- [ ] 14.5 Verify metrics flow end-to-end: sysmon-osx → poller → core → UI -- [ ] 14.6 Run full test suite - -## 15. Release -- [ ] 15.1 Document upgrade path in release notes -- [ ] 15.2 Create migration guide for existing users -- [ ] 15.3 Tag release with rename diff --git a/openspec/changes/replace-srql-dsl/proposal.md b/openspec/changes/replace-srql-dsl/proposal.md deleted file mode 100644 index f8f86f3be..000000000 --- a/openspec/changes/replace-srql-dsl/proposal.md +++ /dev/null @@ -1,14 +0,0 @@ -## Why -- The OCaml SRQL translator only speaks the legacy DSL that was tuned for Proton/ClickHouse semantics, and the Proton stacks are already being decommissioned. -- Maintaining OCaml/dune infrastructure for a single binary makes it hard to attract maintainers, integrate with the rest of the Rust-focused telemetry code, or reuse shared tooling. -- Query traffic is shifting to CNPG, and we currently have no documented or supported way to execute ServiceRadar queries against the CNPG device/timeseries schemas. - -## What Changes -1. Design and implement a Rust-based SRQL translator/service that speaks the `/api/query` contract, uses Diesel.rs for query construction, and targets CNPG through pooled connections that respect SPIFFE/Kong authentication requirements. -2. Define the new SRQL DSL syntax/semantics so it maps onto CNPG schemas (devices, signals, aggregated metrics) and provides backward-compatible operators for the dashboards that today rely on the OCaml service. -3. Outline the migration story: dual-running strategy, toggles, telemetry/alerting so we can flip Kong/Core consumers over without downtime, plus documentation for running the Rust service locally and in demo/prod clusters. - -## Impact -- New Rust crate/binary plus Docker/Bazel targets, Diesel dependency management (Go toolchain unaffected). -- Updates to docs (`architecture.md`, SRQL runbooks) and potentially Next.js/Core configs to point at the new service. -- Operational readiness work (metrics, logs, config) to support rollout plus removal plan for the OCaml code once the Rust DSL is fully vetted. diff --git a/openspec/changes/replace-srql-dsl/specs/srql/spec.md b/openspec/changes/replace-srql-dsl/specs/srql/spec.md deleted file mode 100644 index c9aa35fd7..000000000 --- a/openspec/changes/replace-srql-dsl/specs/srql/spec.md +++ /dev/null @@ -1,40 +0,0 @@ -## ADDED Requirements - -### Requirement: Rust SRQL service executes CNPG queries via Diesel -ServiceRadar MUST expose a Rust-based SRQL service that accepts the existing `/api/query` contract, uses Diesel.rs to translate DSL ASTs into SQL, and executes them against the CNPG clusters that now store device and telemetry data. - -#### Scenario: Diesel-backed translator hits CNPG -- **GIVEN** the SRQL service running from `rust/bin/srql` (or the matching Docker image) -- **WHEN** it receives an authenticated `/api/query` request that filters devices by `site_id` and aggregates packet loss -- **THEN** the service converts the DSL into Diesel query builders that target the CNPG pool configured via `DATABASE_URL`, executes the resulting SQL, and returns JSON rows without delegating to Proton/OCaml components. - -#### Scenario: Connection management respects SPIFFE/Kong policies -- **GIVEN** Kong forwards a client request with mutual TLS headers and SPIFFE identities issued by SPIRE -- **WHEN** the Rust SRQL service opens or reuses a CNPG connection -- **THEN** it uses the configured SPIFFE-aware credentials/cert bundles, logs connection failures, and enforces the existing request auth checks before issuing SQL. - -### Requirement: SRQL DSL maps to CNPG schemas with backward compatibility -The new DSL MUST document and implement operators for selecting inventory, aggregations, and time-bucket queries against CNPG schemas while preserving behavior expected by dashboards that were built on top of the OCaml SRQL layer. - -#### Scenario: Canonical dashboards run unchanged -- **GIVEN** a set of stored SRQL queries used by device search, alert pages, and demo dashboards -- **WHEN** the same SRQL statements are executed against the Rust service in CNPG mode -- **THEN** they succeed without syntax changes, leverage Diesel to emit SQL against `devices`, `telemetry_samples`, and other CNPG tables, and return the same columns (device id, timestamp, metric buckets) the UI expects. - -#### Scenario: Unsupported Proton constructs fail fast -- **GIVEN** a user submits a query that references Proton-only stream operators (e.g., `STREAM WINDOW` or `FORMAT JSONEachRow`) -- **WHEN** the Rust SRQL parser encounters those constructs -- **THEN** it returns a descriptive error that the DSL is CNPG-backed and does not attempt to issue a Proton request. - -### Requirement: Rust SRQL service is the sole `/api/query` backend -The OCaml translator MUST be fully removed once the Rust implementation lands so every environment routes `/api/query` traffic exclusively to the CNPG-backed Rust service without any dual-run or Proton bridge modes. - -#### Scenario: Kong routes only to the Rust translator -- **GIVEN** the demo or prod Kong gateway forwarding `/api/query` calls -- **WHEN** clients submit SRQL statements -- **THEN** the request terminates on the `rust/srql` deployment (or its Docker Compose equivalent), there is no live OCaml SRQL pod to consult, and the response is produced solely by the CNPG-backed Diesel planner. - -#### Scenario: Legacy dual-run toggles are gone -- **GIVEN** operators rolling out new SRQL code or adjusting configs -- **WHEN** they inspect environment variables, Helm values, or Docker Compose overrides -- **THEN** there are no `SRQL_DUAL_*` flags or Proton passthrough settings to enable the OCaml translator; the only configurable backend is the Rust service’s CNPG connection. diff --git a/openspec/changes/replace-srql-dsl/tasks.md b/openspec/changes/replace-srql-dsl/tasks.md deleted file mode 100644 index 0310f8bbf..000000000 --- a/openspec/changes/replace-srql-dsl/tasks.md +++ /dev/null @@ -1,14 +0,0 @@ -## 1. Rust SRQL translator -- [x] 1.1 Finalize crate layout (workspace entry, Bazel targets, Docker image) and add Diesel plus CNPG connection pooling dependencies. -- [x] 1.2 Port the SRQL parser/planner into Rust, exposing modules for parsing the DSL AST and translating it into Diesel query builders against CNPG schemas. -- [x] 1.3 Implement the `/api/query` HTTP surface (mTLS + Kong-authenticated) that executes translated statements via Diesel, streams rows, and exposes metrics/logging. - -## 2. DSL compatibility and migration -- [x] 2.1 Define the updated SRQL syntax/semantics doc and add fixtures verifying parity for the dashboards/alerts that currently call the OCaml service. *(Docs now describe the Rust-powered CNPG/Timescale flow and enumerate the field mapping + query conventions.)* -- [x] 2.2 Rip out the dual-run plumbing (configs, headers, env vars) so `/api/query` always targets the Rust translator and there is no path to re-enable the OCaml service. *(Rust server no longer instantiates the dual runner and the web/client configs only know about the CNPG backend.)* -- [x] 2.3 Remove or quarantine Proton-specific behaviors so implementations fail fast when unsupported operators are used. *(New Diesel executor rejects Proton-only fields and the UI now consumes canonical timestamp columns.)* - -## 3. Operational integration -- [x] 3.1 Update architecture/docs/runbooks to describe the Rust service, CNPG connectivity, and local dev instructions. *(Architecture PRD now covers the CNPG + Timescale + AGE plan and SRQL docs reference the Rust implementation.)* -- [x] 3.2 Produce deployment manifests (Docker Compose + k8s demo) and CI tasks so the Rust SRQL binary builds, tests, and publishes images alongside other services. *(Compose now launches the Rust binary, Bazel builds the `rust/srql` image, and demo/prod overlays include the new Deployment wired to SHA-tagged pushes.)* -- [x] 3.3 Schedule and execute the cut-over plan (demo first, then prod), including removing the OCaml deployment, after the new service clears validation. *(Demo-staging is now SRQL-only, Proton workloads were removed, and the Kong/Web routing plus documentation have been updated to reflect the finalized cut-over.)* diff --git a/openspec/changes/restore-soft-deleted-devices/proposal.md b/openspec/changes/restore-soft-deleted-devices/proposal.md deleted file mode 100644 index 971498317..000000000 --- a/openspec/changes/restore-soft-deleted-devices/proposal.md +++ /dev/null @@ -1,14 +0,0 @@ -# Change: Restore Soft-Deleted Devices on Fresh Sightings - -## Why -After the latest deploy, the demo inventory dropped from ~50k faker devices to ~42k, and CNPG shows ~50k devices marked `_deleted=true`. Once a device is tombstoned, new sightings no longer clear `_deleted`, so devices never reanimate during DHCP churn. - -## What Changes -- Allow non-deletion device updates to clear `_deleted`/`deleted` flags so re-sighted devices return to the active inventory while still honoring explicit deletion updates. -- Add regression coverage to prevent future drops in faker/demo when devices churn IPs. -- Verify demo counts recover to 50k and remain stable across churn cycles. - -## Impact -- Affected specs: `service-device-capabilities` -- Affected code: `pkg/db/cnpg_unified_devices.go` (metadata merge), CNPG upsert regression tests, faker/demo verification scripts or docs. -- Open issue: Registry inventory is under-counting (~45–48k vs 50k in CNPG/SRQL); registry rehydration/consistency needs follow-up. diff --git a/openspec/changes/restore-soft-deleted-devices/specs/service-device-capabilities/spec.md b/openspec/changes/restore-soft-deleted-devices/specs/service-device-capabilities/spec.md deleted file mode 100644 index b98508aab..000000000 --- a/openspec/changes/restore-soft-deleted-devices/specs/service-device-capabilities/spec.md +++ /dev/null @@ -1,11 +0,0 @@ -## ADDED Requirements -### Requirement: Soft-deleted devices reactivate on new sightings -When a device marked as deleted is observed again with a non-deletion update, the system SHALL clear deletion flags so the device returns to the active inventory. - -#### Scenario: Re-sighted device clears deletion flag -- **WHEN** an incoming device update for `sr:device-123` does not set `_deleted` or `deleted` -- **THEN** the upsert removes any prior `_deleted`/`deleted` flags and the device appears in inventory queries. - -#### Scenario: Explicit deletion remains honored -- **WHEN** an incoming update includes `_deleted=true` -- **THEN** the device remains excluded from active inventory and continues to satisfy unique-IP constraints for soft deletions. diff --git a/openspec/changes/restore-soft-deleted-devices/tasks.md b/openspec/changes/restore-soft-deleted-devices/tasks.md deleted file mode 100644 index 42966413e..000000000 --- a/openspec/changes/restore-soft-deleted-devices/tasks.md +++ /dev/null @@ -1,10 +0,0 @@ -## 1. Implementation -- [x] 1.1 Update CNPG device upsert to clear `_deleted`/`deleted` when processing non-deletion updates so churned devices reanimate (`pkg/db/cnpg_unified_devices.go`). -- [x] 1.2 Add regression test asserting that a non-deletion update removes `_deleted` while explicit deletion updates keep it set (`pkg/db/cnpg_unified_devices_test.go`). -- [x] 1.3 Validate demo CNPG counts return to ~50k active devices after deploy (non-deleted count matches faker total). - -## 2. Verification -- [ ] 2.1 `openspec validate restore-soft-deleted-devices --strict` -- [x] 2.2 `go test` for updated DB upsert logic -- [x] 2.3 Manual DB query in demo: `select count(*) from unified_devices where coalesce(lower(metadata->>'_deleted'),'false') <> 'true';` (observed 50,003) -- [ ] 2.4 Registry consistency check: registry-backed inventory should match CNPG counts; currently ~45–48k via registry versus 50k in CNPG/SRQL (investigation pending). diff --git a/openspec/changes/stabilize-age-graph-ingestion/proposal.md b/openspec/changes/stabilize-age-graph-ingestion/proposal.md deleted file mode 100644 index c8a72cfa9..000000000 --- a/openspec/changes/stabilize-age-graph-ingestion/proposal.md +++ /dev/null @@ -1,13 +0,0 @@ -# Change: Stabilize AGE graph ingestion under contention - -## Why -Core in the demo namespace is emitting AGE write failures (`Entity failed to be updated: 3` / SQLSTATE XX000) and statement timeouts while registering pollers, which maps to Postgres `TM_Updated` lock conflicts in AGE’s `cypher_set` path. The registry/topology writers and the age-backfill job all fire MERGE batches in parallel with no retry/backpressure, so overlapping writes lose batches and block until statement_timeout. - -## What Changes -- Funnel AGE graph writes through a bounded worker/queue with chunking so MERGE batches do not run concurrently against the same graph and stay under CNPG statement timeouts. -- Add targeted retry/backoff for transient AGE errors (TM_Updated/XX000 and statement_timeout) plus metrics/logging for queued/failed batches so operators can spot contention. -- Coordinate age-backfill with live ingestion (shared queue or mutex/flag) and document the demo runbook so rebuilds do not clobber live writes. - -## Impact -- Affected specs: device-relationship-graph -- Affected code: pkg/registry/age_graph_writer.go, pkg/core/discovery.go, cmd/tools/age-backfill, docs/docs/runbooks/age-graph-readiness.md, CNPG/AGE config (statement_timeout, worker tuning) diff --git a/openspec/changes/stabilize-age-graph-ingestion/specs/device-relationship-graph/spec.md b/openspec/changes/stabilize-age-graph-ingestion/specs/device-relationship-graph/spec.md deleted file mode 100644 index 58ce4b24e..000000000 --- a/openspec/changes/stabilize-age-graph-ingestion/specs/device-relationship-graph/spec.md +++ /dev/null @@ -1,18 +0,0 @@ -## ADDED Requirements -### Requirement: AGE graph writes tolerate contention with retries and backpressure -The system SHALL process AGE graph merges through a backpressure-aware writer that retries transient AGE errors (e.g., SQLSTATE XX000 “Entity failed to be updated”, SQLSTATE 57014 statement timeout) so overlapping registry/backfill writes do not drop batches. - -#### Scenario: Concurrent merges do not lose updates -- **WHEN** registry ingestion and a graph rebuild both issue overlapping MERGE batches -- **THEN** the writer queues the work, retries conflicts with bounded backoff, and the batch eventually commits without emitting `Entity failed to be updated` warnings. - -#### Scenario: Queue prevents overloading AGE -- **WHEN** the AGE write rate exceeds what CNPG can service -- **THEN** the writer applies bounded queueing/backpressure, exports queue-depth metrics, and avoids timing out statements while keeping ingestion lossless. - -### Requirement: AGE backfill coexists with live ingestion -The system SHALL allow the age-backfill utility to run alongside live core graph writes without causing XX000 or statement timeout errors. - -#### Scenario: Backfill during steady-state ingestion -- **WHEN** age-backfill runs in the demo namespace while pollers and agents continue publishing updates -- **THEN** graph merges succeed via the coordinated writer path, and core logs do not emit `Entity failed to be updated` or `statement timeout` warnings. diff --git a/openspec/changes/stabilize-age-graph-ingestion/tasks.md b/openspec/changes/stabilize-age-graph-ingestion/tasks.md deleted file mode 100644 index 1df6b8986..000000000 --- a/openspec/changes/stabilize-age-graph-ingestion/tasks.md +++ /dev/null @@ -1,13 +0,0 @@ -## 1. Diagnostics and guardrails -- [ ] 1.1 Add structured logging/metrics that tag AGE errors (XX000 TM_Updated vs 57014 statement_timeout) with batch sizes and queue depth. -- [ ] 1.2 Surface configuration for AGE statement timeout/worker limits (env/config) with sane defaults for demo. - -## 2. Serialized and retried writer -- [ ] 2.1 Introduce a bounded work queue/worker that serializes AGE graph/interface/topology writes and applies chunk size limits. -- [ ] 2.2 Add retry/backoff for transient AGE errors (`Entity failed to be updated` XX000 and statement_timeout) with capped attempts and jitter. -- [ ] 2.3 Expose queue depth/backlog metrics/alerts and keep registry logs from spamming while still surfacing hard failures. - -## 3. Backfill coexistence and validation -- [ ] 3.1 Route age-backfill through the same queue or add coordination (mutex/flag) so rebuilds cannot run concurrent MERGEs against live ingestion. -- [ ] 3.2 Validate on demo: run age-backfill while pollers/agents publish updates; confirm no AGE XX000/statement timeout warnings and graph data persists. -- [ ] 3.3 Update the AGE runbook with contention troubleshooting and verification steps. diff --git a/openspec/changes/stabilize-device-ingestion-backpressure/proposal.md b/openspec/changes/stabilize-device-ingestion-backpressure/proposal.md deleted file mode 100644 index 3f6efae3c..000000000 --- a/openspec/changes/stabilize-device-ingestion-backpressure/proposal.md +++ /dev/null @@ -1,13 +0,0 @@ -# Change: Stabilize device ingestion when AGE graph backpressures the registry - -## Why -Core is logging AGE graph queue timeouts (`queue_wait_secs` ~120s, `context deadline exceeded`) and large non-canonical skips during stats aggregation while the inventory is stuck around 20k devices instead of the expected ~50k faker load. The stats cache shows CNPG reporting only ~3.3k devices while the in-memory registry holds ~16k, and ICMP capabilities for `k8s-agent` have disappeared. AGE writes are serialized and waited on synchronously in `ProcessBatchDeviceUpdates`, so the graph backlog stalls ingest traffic and lets registry/CNPG counts drift. - -## What Changes -- Decouple registry ingest from AGE graph execution with bounded, async graph dispatch so device updates cannot stall on the graph queue; add fast-fail/backoff when the graph path is unhealthy. -- Add parity diagnostics between CNPG and registry (stats cache + logs/metrics/alerts) with tolerances for faker-scale loads, and surface why records are skipped as non-canonical. -- Ensure service-device capability updates (ICMP for `k8s-agent`, etc.) persist even under graph backpressure, with a replay path for any dropped capability or graph batches. - -## Impact -- Affected specs: device-inventory, device-relationship-graph -- Affected code: pkg/core/stats_aggregator.go, pkg/registry/age_graph_writer.go, pkg/registry/registry.go, pkg/core/metrics.go, observability/alert wiring diff --git a/openspec/changes/stabilize-device-ingestion-backpressure/specs/device-inventory/spec.md b/openspec/changes/stabilize-device-ingestion-backpressure/specs/device-inventory/spec.md deleted file mode 100644 index 21a79d0d6..000000000 --- a/openspec/changes/stabilize-device-ingestion-backpressure/specs/device-inventory/spec.md +++ /dev/null @@ -1,22 +0,0 @@ -## ADDED Requirements -### Requirement: Device inventory stays consistent with CNPG during ingest backpressure -The system SHALL keep registry device counts within a configured tolerance of CNPG and emit an explicit alert when drift exceeds that tolerance during high-volume ingest. - -#### Scenario: Faker-scale ingest completes without registry/CNPG drift -- **WHEN** faker generates approximately 50,000 devices and stats aggregation runs while AGE graph writes are backpressured -- **THEN** registry total_devices matches CNPG counts within the configured tolerance (for example, within 1%) and does not silently fall below the expected scale. - -#### Scenario: Drift triggers alert with context -- **WHEN** registry total_devices deviates from CNPG beyond the tolerance -- **THEN** the system emits an alert/log that includes raw/processed counts and skipped_non_canonical figures so operators can triage the discrepancy. - -### Requirement: Service-device capabilities survive graph degradation -The system SHALL persist and surface capability snapshots for service devices (for example, ICMP for `k8s-agent`) even when AGE graph writes are delayed or failing, and SHALL provide replay or visibility for any skipped batches. - -#### Scenario: ICMP capability visible under AGE backlog -- **WHEN** ICMP results arrive for `k8s-agent` while the AGE graph queue is saturated or timing out -- **THEN** the ICMP capability snapshot is recorded and retrievable via the registry/UI within the same ingest pass, independent of graph success. - -#### Scenario: Recover skipped capability batches -- **WHEN** a capability or device-update batch is skipped or delayed because graph dispatch is offline -- **THEN** the system records the skipped batch and replays or exposes it for operator retry so capabilities and devices are not permanently dropped. diff --git a/openspec/changes/stabilize-device-ingestion-backpressure/specs/device-relationship-graph/spec.md b/openspec/changes/stabilize-device-ingestion-backpressure/specs/device-relationship-graph/spec.md deleted file mode 100644 index bb4d56ac0..000000000 --- a/openspec/changes/stabilize-device-ingestion-backpressure/specs/device-relationship-graph/spec.md +++ /dev/null @@ -1,14 +0,0 @@ -## ADDED Requirements -### Requirement: AGE graph writes do not stall device ingestion -The system SHALL decouple AGE graph writes from the synchronous registry ingest path so device updates complete even when the graph queue is saturated or timing out. - -#### Scenario: Registry ingest proceeds during graph backlog -- **WHEN** AGE graph queue depth grows and individual merges would exceed the request timeout -- **THEN** device ingest finishes without waiting for the blocked graph work, and the skipped graph batches are recorded for later replay. - -### Requirement: AGE graph backpressure is bounded and observable -The system SHALL bound AGE graph retries/queueing with metrics and alerts that surface queue depth, wait time, timeout counts, and dropped batches. - -#### Scenario: Operators see actionable AGE queue signals -- **WHEN** AGE graph merge attempts start timing out or being dropped because the queue is full -- **THEN** metrics/logs report queue depth, wait durations, and timeout counts with batch sizes so operators can react before ingestion is impacted. diff --git a/openspec/changes/stabilize-device-ingestion-backpressure/tasks.md b/openspec/changes/stabilize-device-ingestion-backpressure/tasks.md deleted file mode 100644 index a33db60ff..000000000 --- a/openspec/changes/stabilize-device-ingestion-backpressure/tasks.md +++ /dev/null @@ -1,14 +0,0 @@ -## 1. Diagnostics -- [ ] 1.1 Add registry vs CNPG parity metrics/logs (raw vs processed vs skipped_non_canonical) and alerts when drift exceeds tolerance on faker-scale loads. -- [ ] 1.2 Emit AGE graph queue/backpressure timing metrics with traceability to registry batches (queue depth, wait time, timeout counts). -- [ ] 1.3 Capture capability ingestion gaps for service devices (e.g., ICMP on `k8s-agent`) with targeted warnings and counters. - -## 2. Ingestion resilience -- [ ] 2.1 Decouple AGE graph writes from the synchronous registry path (fire-and-forget worker or callback) with bounded retries and fast failure when the queue is saturated. -- [ ] 2.2 Add guardrails so registry ingestion and capability snapshots proceed even when AGE is unhealthy; persist replay/backfill artifacts for skipped graph batches. -- [ ] 2.3 Harden non-canonical selection to avoid miscounts (large skipped_non_canonical bursts) and reconcile registry snapshots against CNPG totals. - -## 3. Validation -- [ ] 3.1 Load-test with faker 50k devices and confirm inventory returns ~50,002 devices without repeated AGE timeouts or registry/CNPG drift. -- [ ] 3.2 Verify ICMP capability for `k8s-agent` appears in registry/UI under AGE backpressure and after recovery/replay. -- [ ] 3.3 Document runbook updates for detecting and clearing AGE-induced ingest stalls. diff --git a/openspec/changes/stabilize-spire-admin-edge-onboarding/proposal.md b/openspec/changes/stabilize-spire-admin-edge-onboarding/proposal.md deleted file mode 100644 index 50b538632..000000000 --- a/openspec/changes/stabilize-spire-admin-edge-onboarding/proposal.md +++ /dev/null @@ -1,14 +0,0 @@ -# Change: Stabilize SPIRE admin for edge onboarding - -## Why -- Edge checker onboarding fails with 502 because Core cannot reach SPIRE admin (`x509: certificate signed by unknown authority`) while creating join tokens. -- SPIRE server PSAT allow-list does not include Core/Datasvc service accounts, so the admin mTLS handshake breaks and Core crashloops. - -## What Changes -- Update the Helm SPIRE server config to trust Core and Datasvc service accounts for k8s_psat attestation. -- Roll out SPIRE (server/agents) and restart Core/Web so the new trust settings take effect. -- Validate edge onboarding create-package flows after SPIRE trust is fixed. - -## Impact -- Affected specs: none (bugfix to existing SPIRE trust behavior). -- Affected code: Helm SPIRE server template, demo rollout procedure, edge onboarding create package path (Core/SPIRE admin). diff --git a/openspec/changes/stabilize-spire-admin-edge-onboarding/tasks.md b/openspec/changes/stabilize-spire-admin-edge-onboarding/tasks.md deleted file mode 100644 index 24073f8f2..000000000 --- a/openspec/changes/stabilize-spire-admin-edge-onboarding/tasks.md +++ /dev/null @@ -1,8 +0,0 @@ -## 1. Implementation -- [x] 1.1 Add Core and Datasvc service accounts to the SPIRE server k8s_psat allow-list in Helm. -- [x] 1.2 Redeploy SPIRE (server/agents) and restart Core/Web so SPIRE admin mTLS works again. -- [ ] 1.3 Validate edge onboarding create-package (poller/agent/checker) succeeds without SPIRE admin TLS errors. - -## 2. Validation -- [x] 2.1 `helm status serviceradar -n demo` shows deployed/healthy after rollout. -- [ ] 2.2 Edge checker package creation returns 201 and no 502 in UI; core logs show join token creation succeeds. diff --git a/openspec/changes/update-helm-demo-chart/proposal.md b/openspec/changes/update-helm-demo-chart/proposal.md deleted file mode 100644 index b5153187b..000000000 --- a/openspec/changes/update-helm-demo-chart/proposal.md +++ /dev/null @@ -1,20 +0,0 @@ -# Change: Stabilize Helm Demo Deployment (secrets, config, NATS, storage) - -## Why -- Helm install for the demo namespace leaves critical services crashlooping (NATS mount failure, missing ConfigMap payloads, missing PVC/DB secrets). -- Proton and controller-manager components are unnecessary for the trimmed demo and create extra failures. - -## What Changes -- Fix `serviceradar-config` rendering so all keys (including `nats.conf`) ship to the cluster. -- Add/ensure required storage and secrets for core/CNPG consumers (core PVC, cnpg-superuser) and tighten secret generation to auth-only keys. -- Gate optional components (Proton, SPIRE controller manager) and fix NATS config mounting to unblock dependent pods. - -## Impact -- Affected code: `helm/serviceradar/templates/*`, `helm/serviceradar/files/serviceradar-config.yaml` -- Affected specs: none (operational/deployment fixes) - -## Current State (demo + demo-staging) -- Done: ConfigMap renders all keys (including `nats.conf`), NATS mounts via directory and is Ready, core PVC + `cnpg-superuser` secret provisioned, Proton disabled/removed from demo. SPIRE chart aligned with k8s/demo PSAT settings (token audience `spire-server`, projected SA tokens, controller manager enabled, ClusterSPIFFEIDs present). Agents issue SVIDs; datasvc is healthy with SPIFFE. -- Edge onboarding: secret-generator now seeds `edge-onboarding-key` (overrideable via values) and core consumes it; SPIRE alignment unblocks SVID fetch across services. -- CNPG: Core uses `spire` user/DB with CNPG CA mounted and reaches CNPG. The problematic device metrics CAGG has been rewritten into single-hypertable CAGGs (`device_metrics_summary_cpu|disk|memory`) plus a `device_metrics_summary` view; the SQL now applies successfully on Timescale 2.24. CNPG passwords come from `spire-db-credentials`/`cnpg-superuser` (currently “changeme”). -- Blockers cleared: Agent is stable (hostNetwork + `ClusterFirstWithHostNet`, bumped memory), db-event-writer healthy with CNPG CA/TLS, flowgger rebuilt on Debian testing (OpenSSL 3.2) and probes switched to TCP, and a CNPG init job now provisions the `serviceradar` DB/user/privileges. Demo is running image tag `1.0.56` for flowgger; remove the init container hack entirely. diff --git a/openspec/changes/update-helm-demo-chart/specs/helm-deploy/spec.md b/openspec/changes/update-helm-demo-chart/specs/helm-deploy/spec.md deleted file mode 100644 index 047009aa2..000000000 --- a/openspec/changes/update-helm-demo-chart/specs/helm-deploy/spec.md +++ /dev/null @@ -1,33 +0,0 @@ -## ADDED Requirements - -### Requirement: Helm demo chart renders complete shared config -The Helm deployment SHALL render and apply a `serviceradar-config` ConfigMap that includes all service configs, including `nats.conf`, when installing the demo stack. - -#### Scenario: ConfigMap contains NATS configuration -- **GIVEN** a Helm render using the demo values -- **WHEN** the manifest for `serviceradar-config` is generated -- **THEN** the data section includes a `nats.conf` key with server settings for NATS - -### Requirement: NATS uses directory-mounted config -The NATS deployment SHALL mount its ConfigMap to a directory path and point the server `--config` flag to that directory file to avoid subPath mount errors. - -#### Scenario: NATS starts with directory-mounted config -- **GIVEN** the NATS pod is created from the chart -- **WHEN** the pod starts -- **THEN** the container uses `/etc/nats-config/nats-server.conf` from a ConfigMap-mounted directory and the pod reaches Ready - -### Requirement: Demo chart provisions required storage and credentials -The Helm deployment SHALL create the core data PVC and a `cnpg-superuser` secret (username/password) so core and dependent workloads can bind storage and connect to CNPG without manual steps. - -#### Scenario: PVC and cnpg-superuser exist after install -- **GIVEN** the chart is installed into an empty namespace with the demo values -- **THEN** a PVC named `serviceradar-core-data` exists and is bound by core -- **AND** a secret `cnpg-superuser` exists with `username` and `password` keys - -### Requirement: Optional components are gated for demo installs -The Helm deployment SHALL allow Proton and the SPIRE controller manager to be disabled via values, with Proton disabled by default for the demo install. - -#### Scenario: Proton disabled in demo values -- **GIVEN** the demo values are applied -- **THEN** Proton is not deployed and its PVCs/RS are not created -- **AND** the SPIRE controller manager sidecar is not started when `enabled=false` diff --git a/openspec/changes/update-helm-demo-chart/tasks.md b/openspec/changes/update-helm-demo-chart/tasks.md deleted file mode 100644 index c13484572..000000000 --- a/openspec/changes/update-helm-demo-chart/tasks.md +++ /dev/null @@ -1,41 +0,0 @@ -# Tasks - -## 1. Implementation -- [x] Fix `serviceradar-config` templating so `nats.conf` and other config keys render and apply -- [x] Adjust NATS config mount to use directory-based config and verify pod starts (NATS now Ready) -- [x] Add missing PVC/secret defaults for core/CNPG consumers (core data PVC, cnpg-superuser secret) -- [x] Disable Proton by default in demo values and remove its deploy from the demo release (no Proton RS/pods) -- [x] Align SPIRE chart with demo manifests (PSAT, projected tokens, controller manager enabled by default for entry reconciliation) -- [x] Run `openspec validate update-helm-demo-chart --strict` - -## 2. Validation -- [x] Helm upgrade applies cleanly with the new ConfigMap (check `serviceradar-config` contains `nats.conf`) -- [x] NATS pod ready; dependent pods restartable (but still blocked on SPIRE SVIDs) -- [x] Core pod bound to its PVC and starts without Pending/Init errors (datasvc now obtains SVIDs and is Running; core unblocked by CAGG rebuild in demo; staging has the rebuilt CAGGs via migration 0005) -- [x] Agent pod starts and reaches KV (currently crashlooping: `failed to load config: ... name resolver error: produced zero addresses` in demo) -- [x] DB event writer starts with SPIFFE TLS (currently `x509svid: could not verify leaf certificate` when connecting in demo) -- [x] Flowgger image fixed to run on cluster OpenSSL (currently `libcrypto.so.3: OPENSSL_3.2.0 not found`) - -## 3. Cleanup -- [x] Remove Proton pod/RS from demo namespace (disabled by values) -- [x] Document the change in the proposal for review/approval - -## 4. Recent Fixes (db-event-writer) -- [x] Fixed SPIFFE bundle delivery: Added `SPIFFE_ENDPOINT_SOCKET` to `serviceradar-datasvc` deployment to ensure `go-spiffe` initializes correctly. -- [x] Aligned SPIFFE client envs: Helm now sets `SPIFFE_ENDPOINT_SOCKET` for `db-event-writer` so the workload API uses the mounted host socket; mirror this for any other SPIFFE clients if they land on hostNetwork nodes. -- [x] Fixed DB Authentication: Enabled `enableSuperuserAccess: true` in `spire-postgres.yaml` (CNPG Cluster) to allow `postgres` user authentication. -- [x] Fixed DB Configuration: Updated `db-event-writer` to use the correct `spire` database (via `CNPG_DATABASE` env var and ConfigMap update) instead of the old `telemetry` placeholder. - -## Notes / Current Blockers -- SPIRE chart mirrors k8s/demo settings (k8s_psat, token audience `spire-server`, projected SA tokens, controller manager on). Agents issue SVIDs; datasvc healthy with SPIFFE. -- Edge onboarding key now auto-generated/seeded via secret-generator job (and settable via values); core picks it up from `serviceradar-secrets`. -- Core connects to CNPG using `spire` user/DB with CNPG CA mounted; the device metrics CAGG SQL is now split into three single-hypertable CAGGs (`device_metrics_summary_cpu|disk|memory`) plus a joining view so Timescale 2.24 accepts it. Need to roll core with the updated migration bundle to clear the CrashLoop. -- Flowgger fixed by shipping a Debian testing-based image (`1.0.56`) with OpenSSL 3.2; liveness/readiness probes use TCP 50044 and the init hack was removed. -- `db-event-writer` is now healthy and processing messages. -- Agent uses `hostNetwork`; set `dnsPolicy: ClusterFirstWithHostNet` so cluster DNS resolves KV/Core endpoints when pulling config. Memory limits increased to 1Gi to avoid OOM. -- DB event writer now mounts the `cnpg-ca` secret and points its CNPG TLS CA file to `Values.cnpg.caFile` (defaults to `/etc/serviceradar/cnpg/ca.crt`) so SPIFFE Postgres connections can verify the CNPG server cert. -- Added a post-install/upgrade hook job to reseed the db-event-writer KV entry from the charted template using the KV certs, so the CNPG CA path in KV is corrected without manual edits. -- App database separation: Helm now defaults CNPG credentials to an app-specific `serviceradar` database/user/secret (no longer sharing `spire`/`postgres`). Manual psql was used to create `serviceradar` DB/user in demo; need idempotent Helm bootstrap (CNPG init SQL) so fresh installs provision the DB/user with the charted secret and grant schema privileges automatically. - -## 5. TODO (App DB bootstrap) -- [x] Add CNPG init SQL (or Helm job) that creates/owns the `serviceradar` database and user using `serviceradar-db-credentials`, grants schema/table/sequence privileges, and is idempotent on install/upgrade. diff --git a/pkg/checker/snmp/BUILD.bazel b/pkg/checker/snmp/BUILD.bazel index e7e2ee307..98f6f8a37 100644 --- a/pkg/checker/snmp/BUILD.bazel +++ b/pkg/checker/snmp/BUILD.bazel @@ -35,9 +35,12 @@ go_test( name = "snmp_test", srcs = [ "aggregator_test.go", + "client_conversion_test.go", "collector_test.go", + "service_deadlock_test.go", "service_test.go", ], + embedsrcs = ["service.go"], embed = [":snmp"], deps = [ "//pkg/logger", diff --git a/pkg/checker/snmp/client.go b/pkg/checker/snmp/client.go index 02b0592f5..8677a66a0 100644 --- a/pkg/checker/snmp/client.go +++ b/pkg/checker/snmp/client.go @@ -206,23 +206,21 @@ const defaultTimeTickDuration = time.Second / 100 func (*SNMPClientImpl) convertVariable(variable gosnmp.SnmpPDU) (interface{}, error) { // Map of SNMP types to conversion functions conversionMap := map[gosnmp.Asn1BER]func(gosnmp.SnmpPDU) interface{}{ - gosnmp.Boolean: convertBoolean, - gosnmp.BitString: convertBitString, - gosnmp.Null: convertNull, - gosnmp.ObjectDescription: convertObjectDescription, - gosnmp.Opaque: convertOpaque, - gosnmp.NsapAddress: convertNsapAddress, - gosnmp.Uinteger32: convertUinteger32, - gosnmp.OpaqueFloat: convertOpaqueFloat, - gosnmp.OpaqueDouble: convertOpaqueDouble, - gosnmp.Integer: convertInteger, - gosnmp.OctetString: convertOctetString, - gosnmp.ObjectIdentifier: convertObjectIdentifier, - gosnmp.IPAddress: convertIPAddress, - gosnmp.Counter32: convertCounter32Gauge32, - gosnmp.Gauge32: convertCounter32Gauge32, - gosnmp.Counter64: convertCounter64, - gosnmp.TimeTicks: convertTimeTicks, + gosnmp.Boolean: convertBoolean, + gosnmp.BitString: convertBitString, + gosnmp.Null: convertNull, + gosnmp.Opaque: convertOpaque, + gosnmp.NsapAddress: convertNsapAddress, + gosnmp.Uinteger32: convertUinteger32, + gosnmp.OpaqueFloat: convertOpaqueFloat, + gosnmp.OpaqueDouble: convertOpaqueDouble, + gosnmp.Integer: convertInteger, + gosnmp.ObjectIdentifier: convertObjectIdentifier, + gosnmp.IPAddress: convertIPAddress, + gosnmp.Counter32: convertCounter32Gauge32, + gosnmp.Gauge32: convertCounter32Gauge32, + gosnmp.Counter64: convertCounter64, + gosnmp.TimeTicks: convertTimeTicks, } // Check for types that need an error return @@ -243,6 +241,14 @@ func (*SNMPClientImpl) convertVariable(variable gosnmp.SnmpPDU) (interface{}, er return convertEndOfContents(variable) } + if variable.Type == gosnmp.ObjectDescription { + return convertObjectDescription(variable) + } + + if variable.Type == gosnmp.OctetString { + return convertOctetString(variable) + } + // Look up the appropriate conversion function if convertFunc, found := conversionMap[variable.Type]; found { return convertFunc(variable), nil @@ -264,8 +270,13 @@ func convertNull(gosnmp.SnmpPDU) interface{} { return nil } -func convertObjectDescription(variable gosnmp.SnmpPDU) interface{} { - return string(variable.Value.(byte)) +func convertObjectDescription(variable gosnmp.SnmpPDU) (interface{}, error) { + bytes, ok := variable.Value.([]byte) + if !ok { + return nil, fmt.Errorf("%w: ObjectDescription expected []byte, got %T", ErrSNMPConvert, variable.Value) + } + + return string(bytes), nil } func convertOpaque(variable gosnmp.SnmpPDU) interface{} { @@ -292,8 +303,13 @@ func convertInteger(variable gosnmp.SnmpPDU) interface{} { return variable.Value.(int) } -func convertOctetString(variable gosnmp.SnmpPDU) interface{} { - return string(variable.Value.(byte)) +func convertOctetString(variable gosnmp.SnmpPDU) (interface{}, error) { + bytes, ok := variable.Value.([]byte) + if !ok { + return nil, fmt.Errorf("%w: OctetString expected []byte, got %T", ErrSNMPConvert, variable.Value) + } + + return string(bytes), nil } func convertObjectIdentifier(variable gosnmp.SnmpPDU) interface{} { diff --git a/pkg/checker/snmp/client_conversion_test.go b/pkg/checker/snmp/client_conversion_test.go new file mode 100644 index 000000000..1afc7ec89 --- /dev/null +++ b/pkg/checker/snmp/client_conversion_test.go @@ -0,0 +1,95 @@ +/* + * Copyright 2025 Carver Automation Corporation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package snmp + +import ( + "testing" + + "github.com/gosnmp/gosnmp" + "github.com/stretchr/testify/require" +) + +func TestConvertVariable_OctetStringBytes(t *testing.T) { + client := &SNMPClientImpl{} + + variable := gosnmp.SnmpPDU{ + Name: ".1.3.6.1.2.1.1.1.0", + Type: gosnmp.OctetString, + Value: []byte("Test SNMP String"), + } + + value, err := client.convertVariable(variable) + require.NoError(t, err) + require.Equal(t, "Test SNMP String", value) +} + +func TestConvertVariable_ObjectDescriptionBytes(t *testing.T) { + client := &SNMPClientImpl{} + + variable := gosnmp.SnmpPDU{ + Name: ".1.3.6.1.2.1.1.1.0", + Type: gosnmp.ObjectDescription, + Value: []byte("Device OS v1.2.3"), + } + + value, err := client.convertVariable(variable) + require.NoError(t, err) + require.Equal(t, "Device OS v1.2.3", value) +} + +func TestConvertVariable_StringTypesUnexpectedValueDoNotPanic(t *testing.T) { + client := &SNMPClientImpl{} + + testCases := []struct { + name string + variable gosnmp.SnmpPDU + }{ + { + name: "OctetString byte", + variable: gosnmp.SnmpPDU{ + Name: ".1.3.6.1.2.1.1.1.0", + Type: gosnmp.OctetString, + Value: byte('x'), + }, + }, + { + name: "ObjectDescription string", + variable: gosnmp.SnmpPDU{ + Name: ".1.3.6.1.2.1.1.1.0", + Type: gosnmp.ObjectDescription, + Value: "not-bytes", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var ( + value interface{} + err error + ) + + require.NotPanics(t, func() { + value, err = client.convertVariable(tc.variable) + }) + + require.Nil(t, value) + require.Error(t, err) + require.ErrorIs(t, err, ErrSNMPConvert) + }) + } +} diff --git a/pkg/checker/snmp/service.go b/pkg/checker/snmp/service.go index ce2fa7178..b2b876f1b 100644 --- a/pkg/checker/snmp/service.go +++ b/pkg/checker/snmp/service.go @@ -29,10 +29,9 @@ import ( // Check implements the checker interface by returning the overall status of all SNMP targets. func (s *SNMPService) Check(ctx context.Context) (available bool, msg string) { - s.mu.RLock() - defer s.mu.RUnlock() - - // Re-using the GetStatus logic to get the detailed map + // NOTE: Avoid recursive RWMutex read locking. GetStatus performs its own locking and + // Check must not hold a read lock while calling GetStatus, otherwise a waiting writer + // can deadlock the service due to RWMutex write-preferring semantics. statusMap, err := s.GetStatus(ctx) if err != nil { return false, string(jsonError(fmt.Sprintf("Error getting detailed SNMP status: %v", err))) diff --git a/pkg/checker/snmp/service_deadlock_test.go b/pkg/checker/snmp/service_deadlock_test.go new file mode 100644 index 000000000..61dc51b71 --- /dev/null +++ b/pkg/checker/snmp/service_deadlock_test.go @@ -0,0 +1,101 @@ +/* + * Copyright 2025 Carver Automation Corporation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package snmp pkg/checker/snmp/service_deadlock_test.go +package snmp + +import ( + _ "embed" + "go/ast" + "go/parser" + "go/token" + "testing" +) + +//go:embed service.go +var snmpServiceSource string + +func TestCheckDoesNotRLockAndCallGetStatus(t *testing.T) { + t.Helper() + + fileSet := token.NewFileSet() + parsed, err := parser.ParseFile(fileSet, "service.go", snmpServiceSource, 0) + if err != nil { + t.Fatalf("parse service.go: %v", err) + } + + var checkDecl *ast.FuncDecl + for _, decl := range parsed.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok || funcDecl.Recv == nil || funcDecl.Name == nil { + continue + } + if funcDecl.Name.Name == "Check" { + checkDecl = funcDecl + break + } + } + + if checkDecl == nil || checkDecl.Body == nil || len(checkDecl.Recv.List) == 0 { + t.Fatal("Check method not found in service.go") + } + + if len(checkDecl.Recv.List[0].Names) == 0 || checkDecl.Recv.List[0].Names[0] == nil || checkDecl.Recv.List[0].Names[0].Name == "" { + t.Fatal("unable to determine Check receiver identifier in service.go") + } + receiverName := checkDecl.Recv.List[0].Names[0].Name + + var hasMuRLock bool + var hasGetStatusCall bool + + ast.Inspect(checkDecl.Body, func(node ast.Node) bool { + call, ok := node.(*ast.CallExpr) + if !ok { + return true + } + + selector, ok := call.Fun.(*ast.SelectorExpr) + if !ok || selector.Sel == nil { + return true + } + + if selector.Sel.Name == "GetStatus" { + if recv, ok := selector.X.(*ast.Ident); ok && recv.Name == receiverName { + hasGetStatusCall = true + } + return true + } + + if selector.Sel.Name != "RLock" { + return true + } + + muSelector, ok := selector.X.(*ast.SelectorExpr) + if !ok || muSelector.Sel == nil || muSelector.Sel.Name != "mu" { + return true + } + + if recv, ok := muSelector.X.(*ast.Ident); ok && recv.Name == receiverName { + hasMuRLock = true + } + + return true + }) + + if hasMuRLock && hasGetStatusCall { + t.Fatalf("Check must not call GetStatus while holding s.mu.RLock (recursive RWMutex read locking can deadlock)") + } +} diff --git a/pkg/core/BUILD.bazel b/pkg/core/BUILD.bazel index 41afff6aa..759814d3b 100644 --- a/pkg/core/BUILD.bazel +++ b/pkg/core/BUILD.bazel @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "core", srcs = [ - "backfill.go", "canonical_cache.go", "config.go", "alias_events.go", diff --git a/pkg/core/auth/BUILD.bazel b/pkg/core/auth/BUILD.bazel index c79467001..8c7a12725 100644 --- a/pkg/core/auth/BUILD.bazel +++ b/pkg/core/auth/BUILD.bazel @@ -28,7 +28,10 @@ go_library( go_test( name = "auth_test", - srcs = ["auth_test.go"], + srcs = [ + "auth_test.go", + "rbac_test.go", + ], embed = [":auth"], deps = [ "//pkg/db", diff --git a/pkg/core/auth/rbac.go b/pkg/core/auth/rbac.go index 474eb2b3b..eb090e2a4 100644 --- a/pkg/core/auth/rbac.go +++ b/pkg/core/auth/rbac.go @@ -76,11 +76,17 @@ func getRequiredRoles(path, method string, routeProtection map[string]interface{ // Check for exact match first if protection, exists := routeProtection[path]; exists { - return parseProtection(protection, method) + roles := parseProtection(protection, method) + if len(roles) > 0 { + return roles + } } // Check for wildcard matches for pattern, protection := range routeProtection { + if !strings.HasSuffix(pattern, "/*") { + continue + } if matchesPattern(path, pattern) { return parseProtection(protection, method) } @@ -136,12 +142,12 @@ func HasPermission(user *models.User, permission string, config *models.RBACConf if perm == "*" { return true } - + // Check exact match if perm == permission { return true } - + // Check category wildcard (e.g., "config:*" matches "config:read") if strings.HasSuffix(perm, ":*") { category := strings.TrimSuffix(perm, ":*") @@ -152,7 +158,7 @@ func HasPermission(user *models.User, permission string, config *models.RBACConf } } } - + return false } @@ -174,4 +180,4 @@ func PermissionMiddleware(permission string, config *models.RBACConfig) mux.Midd next.ServeHTTP(w, r) }) } -} \ No newline at end of file +} diff --git a/pkg/core/auth/rbac_test.go b/pkg/core/auth/rbac_test.go new file mode 100644 index 000000000..bd34eecaa --- /dev/null +++ b/pkg/core/auth/rbac_test.go @@ -0,0 +1,31 @@ +package auth + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetRequiredRoles_ExactMatchMethodMapFallsBackToWildcard(t *testing.T) { + routeProtection := map[string]interface{}{ + "/api/admin/*": []string{"admin"}, + "/api/admin/users": map[string]interface{}{ + "POST": []string{"superadmin"}, + }, + } + + roles := getRequiredRoles("/api/admin/users", "GET", routeProtection) + assert.Equal(t, []string{"admin"}, roles) +} + +func TestGetRequiredRoles_ExactMatchMethodMapOverridesWildcard(t *testing.T) { + routeProtection := map[string]interface{}{ + "/api/admin/*": []string{"admin"}, + "/api/admin/users": map[string]interface{}{ + "POST": []string{"superadmin"}, + }, + } + + roles := getRequiredRoles("/api/admin/users", "POST", routeProtection) + assert.Equal(t, []string{"superadmin"}, roles) +} diff --git a/pkg/core/backfill.go b/pkg/core/backfill.go deleted file mode 100644 index 92a57181b..000000000 --- a/pkg/core/backfill.go +++ /dev/null @@ -1,808 +0,0 @@ -package core - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/carverauto/serviceradar/pkg/db" - "github.com/carverauto/serviceradar/pkg/identitymap" - "github.com/carverauto/serviceradar/pkg/logger" - "github.com/carverauto/serviceradar/pkg/models" - "github.com/carverauto/serviceradar/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// identityRow holds a single unified_devices row with the identity key extracted. -type identityRow struct { - deviceID string - key string // identity key (armis_id or netbox_id) - kind identitymap.Kind - ts time.Time - ip string - metadata map[string]string -} - -// BackfillOptions controls how historical identity reconciliation is executed. -type BackfillOptions struct { - DryRun bool - SeedKVOnly bool - Namespace string -} - -func (o BackfillOptions) namespaceOrDefault() string { - ns := strings.TrimSpace(o.Namespace) - if ns == "" { - ns = identitymap.DefaultNamespace - } - return ns -} - -func cloneMetadata(src map[string]string) map[string]string { - if len(src) == 0 { - return map[string]string{} - } - - dst := make(map[string]string, len(src)) - for k, v := range src { - dst[k] = v - } - - return dst -} - -func (r identityRow) toDeviceUpdate() *models.DeviceUpdate { - if r.deviceID == "" { - return nil - } - - meta := cloneMetadata(r.metadata) - - update := &models.DeviceUpdate{ - DeviceID: r.deviceID, - IP: r.ip, - Partition: partitionFromDeviceID(r.deviceID), - Source: models.DiscoverySourceIntegration, - Timestamp: r.ts, - Metadata: meta, - IsAvailable: true, - } - - return update -} - -type kvSeeder struct { - client identityKVClient - namespace string - log logger.Logger -} - -func newKVSeeder(client identityKVClient, namespace string, log logger.Logger) *kvSeeder { - if client == nil { - return nil - } - - ns := strings.TrimSpace(namespace) - if ns == "" { - ns = identitymap.DefaultNamespace - } - - return &kvSeeder{client: client, namespace: ns, log: log} -} - -func (s *kvSeeder) seedRecord(ctx context.Context, record *identitymap.Record, keys []identitymap.Key, dryRun bool) (map[identitymap.Key]bool, error) { - if s == nil || s.client == nil || record == nil || len(keys) == 0 { - return nil, nil - } - - payload, err := identitymap.MarshalRecord(record) - if err != nil { - return nil, err - } - - matched := make(map[identitymap.Key]bool, len(keys)) - var seedErr error - - for _, key := range keys { - keyPath := key.KeyPath(s.namespace) - - resp, err := s.client.Get(ctx, &proto.GetRequest{Key: keyPath}) - if err != nil { - seedErr = errors.Join(seedErr, fmt.Errorf("kv get %s: %w", keyPath, err)) - continue - } - - if !resp.GetFound() || len(resp.GetValue()) == 0 { - matched[key] = false - - if dryRun { - identitymap.RecordKVPublish(ctx, 1, "dry_run") - continue - } - - if _, err := s.client.PutIfAbsent(ctx, &proto.PutRequest{Key: keyPath, Value: payload}); err != nil { - code := status.Code(err) - if code == codes.Aborted || code == codes.AlreadyExists { - identitymap.RecordKVConflict(ctx, code.String()) - if s.log != nil { - s.log.Debug().Str("key", keyPath).Str("reason", code.String()).Msg("Backfill KV create encountered conflict") - } - } - seedErr = errors.Join(seedErr, fmt.Errorf("kv put %s: %w", keyPath, err)) - continue - } - - identitymap.RecordKVPublish(ctx, 1, "created") - if s.log != nil { - s.log.Debug().Str("key", keyPath).Msg("Backfill created canonical identity entry in KV") - } - - continue - } - - existing, err := identitymap.UnmarshalRecord(resp.GetValue()) - if err != nil { - if errors.Is(err, identitymap.ErrCorruptRecord) { - matched[key] = false - if s.log != nil { - s.log.Warn().Str("key", keyPath).Err(err).Msg("Backfill replacing corrupt canonical identity entry in KV") - } - } else { - seedErr = errors.Join(seedErr, fmt.Errorf("kv unmarshal %s: %w", keyPath, err)) - continue - } - } else { - if existing.CanonicalDeviceID == record.CanonicalDeviceID && existing.MetadataHash == record.MetadataHash { - matched[key] = true - identitymap.RecordKVPublish(ctx, 1, "unchanged") - continue - } - - matched[key] = false - } - - if dryRun { - identitymap.RecordKVPublish(ctx, 1, "dry_run") - continue - } - - if _, err := s.client.Update(ctx, &proto.UpdateRequest{Key: keyPath, Value: payload, Revision: resp.GetRevision()}); err != nil { - code := status.Code(err) - if code == codes.Aborted || code == codes.AlreadyExists { - identitymap.RecordKVConflict(ctx, code.String()) - if s.log != nil { - s.log.Debug().Str("key", keyPath).Str("reason", code.String()).Msg("Backfill KV update encountered conflict") - } - } - seedErr = errors.Join(seedErr, fmt.Errorf("kv update %s: %w", keyPath, err)) - continue - } - - identitymap.RecordKVPublish(ctx, 1, "updated") - if s.log != nil { - s.log.Debug().Str("key", keyPath).Msg("Backfill updated canonical identity entry in KV") - } - } - - return matched, seedErr -} - -func buildIdentityRecord(update *models.DeviceUpdate) *identitymap.Record { - if update == nil { - return nil - } - - return &identitymap.Record{ - CanonicalDeviceID: update.DeviceID, - Partition: update.Partition, - MetadataHash: identitymap.HashIdentityMetadata(update), - UpdatedAt: time.Now().UTC(), - Attributes: buildIdentityAttributes(update), - } -} - -func buildIdentityAttributes(update *models.DeviceUpdate) map[string]string { - if update == nil { - return nil - } - - attrs := map[string]string{} - - if update.IP != "" { - attrs["ip"] = update.IP - } - - if update.Partition != "" { - attrs["partition"] = update.Partition - } - - if update.Hostname != nil { - if name := strings.TrimSpace(*update.Hostname); name != "" { - attrs["hostname"] = name - } - } - - if src := strings.TrimSpace(string(update.Source)); src != "" { - attrs["source"] = src - } - - if len(attrs) == 0 { - return nil - } - - return attrs -} - -type identityBackfillStats struct { - totalCandidates int - totalGroups int - totalTombstones int - skippedByKV int -} - -func processIdentityRows( - ctx context.Context, - rows []identityRow, - seeder *kvSeeder, - opts BackfillOptions, - emit func(*models.DeviceUpdate) error, - log logger.Logger, - stats *identityBackfillStats, -) error { - if len(rows) == 0 { - return nil - } - - stats.totalCandidates += len(rows) - - groups := make(map[string][]identityRow) - for _, row := range rows { - if row.key == "" || row.deviceID == "" { - continue - } - groups[row.key] = append(groups[row.key], row) - } - - for key, members := range groups { - if len(members) <= 1 { - continue - } - - stats.totalGroups++ - - canonical := members[0] - for _, candidate := range members[1:] { - if candidate.ts.After(canonical.ts) { - canonical = candidate - } - } - - canonicalUpdate := canonical.toDeviceUpdate() - record := buildIdentityRecord(canonicalUpdate) - - var matches map[identitymap.Key]bool - if seeder != nil && record != nil { - seedMatches, seedErr := seeder.seedRecord(ctx, record, identitymap.BuildKeys(canonicalUpdate), opts.DryRun) - if seedErr != nil { - log.Warn(). - Err(seedErr). - Str("identity_key", key). - Msg("Backfill: failed to seed canonical identity in KV") - } - matches = seedMatches - } - - for _, member := range members { - if member.deviceID == canonical.deviceID { - continue - } - - skip := opts.SeedKVOnly - if !skip && matches != nil { - targetKey := identitymap.Key{Kind: canonical.kind, Value: key} - if matched, ok := matches[targetKey]; ok && matched { - stats.skippedByKV++ - skip = true - } - } - - if skip { - continue - } - - stats.totalTombstones++ - - tombstone := &models.DeviceUpdate{ - DeviceID: member.deviceID, - Partition: partitionFromDeviceID(member.deviceID), - IP: member.ip, - Source: models.DiscoverySourceIntegration, - Timestamp: time.Now(), - IsAvailable: false, - Metadata: map[string]string{"_merged_into": canonical.deviceID}, - } - - log.Info(). - Str("identity_key", key). - Str("from_id", member.deviceID). - Str("to_id", canonical.deviceID). - Msg("Backfill: tombstoning duplicate device") - - if err := emit(tombstone); err != nil { - return err - } - } - } - - return nil -} - -// BackfillIdentityTombstones scans unified_devices for duplicate device_ids that share -// a strong identity (Armis ID or NetBox ID) and reconciles them against the canonical -// identity map. When the KV already points at the canonical device the tombstone is -// skipped, making the job idempotent. Optionally the job can perform KV seeding only. -// -//nolint:gocognit,funlen // historical backfill logic remains complex -func BackfillIdentityTombstones(ctx context.Context, database db.Service, kvClient identityKVClient, log logger.Logger, opts BackfillOptions) error { - namespace := opts.namespaceOrDefault() - seeder := newKVSeeder(kvClient, namespace, log) - - const chunkSize = 500 - tombBatch := make([]*models.DeviceUpdate, 0, chunkSize) - stats := identityBackfillStats{} - - emit := func(update *models.DeviceUpdate) error { - if update == nil { - return nil - } - if opts.DryRun || opts.SeedKVOnly { - return nil - } - - tombBatch = append(tombBatch, update) - if len(tombBatch) < chunkSize { - return nil - } - - if err := database.PublishBatchDeviceUpdates(ctx, tombBatch); err != nil { - return fmt.Errorf("publish tombstones: %w", err) - } - - tombBatch = tombBatch[:0] - return nil - } - - process := func(rows []identityRow) error { - return processIdentityRows(ctx, rows, seeder, opts, emit, log, &stats) - } - - armisRows, err := queryIdentityRows(ctx, database, ` - SELECT device_id, ip, metadata, metadata['armis_device_id'] AS key, _tp_time - FROM table(unified_devices) - WHERE has(map_keys(metadata), 'armis_device_id') - AND NOT has(map_keys(metadata), '_merged_into')`, identitymap.KindArmisID) - if err != nil { - return err - } - - if err := process(armisRows); err != nil { - return err - } - - netboxRows, err := queryIdentityRows(ctx, database, ` - SELECT device_id, ip, metadata, - if(has(map_keys(metadata),'integration_id'), metadata['integration_id'], metadata['netbox_device_id']) AS key, - _tp_time - FROM table(unified_devices) - WHERE has(map_keys(metadata), 'integration_type') AND metadata['integration_type'] = 'netbox' - AND (has(map_keys(metadata),'integration_id') OR has(map_keys(metadata),'netbox_device_id')) - AND NOT has(map_keys(metadata), '_merged_into')`, identitymap.KindNetboxID) - if err != nil { - return err - } - - if err := process(netboxRows); err != nil { - return err - } - - if len(tombBatch) > 0 { - if err := database.PublishBatchDeviceUpdates(ctx, tombBatch); err != nil { - return fmt.Errorf("publish tombstones: %w", err) - } - } - - if opts.DryRun { - log.Info(). - Bool("dry_run", true). - Int("candidate_rows", stats.totalCandidates). - Int("duplicate_groups", stats.totalGroups). - Int("tombstones_would_emit", stats.totalTombstones). - Int("kv_identity_skipped", stats.skippedByKV). - Msg("Identity backfill DRY-RUN completed") - - return nil - } - - if opts.SeedKVOnly { - log.Info(). - Int("candidate_rows", stats.totalCandidates). - Int("duplicate_groups", stats.totalGroups). - Int("kv_identity_skipped", stats.skippedByKV). - Msg("Identity backfill completed with KV seeding only") - return nil - } - - log.Info(). - Int("candidate_rows", stats.totalCandidates). - Int("duplicate_groups", stats.totalGroups). - Int("tombstones_emitted", stats.totalTombstones). - Int("kv_identity_skipped", stats.skippedByKV). - Msg("Identity backfill completed") - - return nil -} - -// BackfillIPAliasTombstones finds sweep-only device_ids by IP for canonical identity devices -// (Armis/NetBox) and reconciles them, optionally seeding the canonical identity map for the -// partition:ip keys. Like BackfillIdentityTombstones, it skips tombstones when the KV already -// reflects the canonical device, making the workflow idempotent. -// -//nolint:gocognit,gocyclo,funlen // legacy backfill logic remains complex -func BackfillIPAliasTombstones(ctx context.Context, database db.Service, kvClient identityKVClient, log logger.Logger, opts BackfillOptions) error { - namespace := opts.namespaceOrDefault() - seeder := newKVSeeder(kvClient, namespace, log) - - type canonical struct { - deviceID string - partition string - ip string - meta map[string]string - } - - buildCanonicalUpdate := func(c canonical) *models.DeviceUpdate { - if c.deviceID == "" { - return nil - } - - return &models.DeviceUpdate{ - DeviceID: c.deviceID, - Partition: c.partition, - IP: c.ip, - Source: models.DiscoverySourceIntegration, - Metadata: cloneMetadata(c.meta), - } - } - - // 1) Fetch canonical devices that have strong identity - rows, err := database.ExecuteQuery(ctx, ` - SELECT device_id, ip, metadata, _tp_time - FROM table(unified_devices) - WHERE (has(map_keys(metadata),'armis_device_id') - OR (has(map_keys(metadata),'integration_type') AND metadata['integration_type']='netbox')) - AND NOT has(map_keys(metadata),'_merged_into') - ORDER BY _tp_time DESC`) - if err != nil { - return fmt.Errorf("query canonical devices failed: %w", err) - } - - // 2) Build canonical list with partitions and IP sets - cands := make([]canonical, 0, len(rows)) - - for _, r := range rows { - dev, _ := r["device_id"].(string) - if dev == "" { - continue - } - - ip, _ := r["ip"].(string) - part := partitionFromDeviceID(dev) - - var meta map[string]string - - switch m := r["metadata"].(type) { - case map[string]string: - meta = m - case map[string]interface{}: - meta = make(map[string]string, len(m)) - - for k, v := range m { - if s, ok := v.(string); ok { - meta[k] = s - } - } - default: - meta = map[string]string{} - } - - cands = append(cands, canonical{deviceID: dev, partition: part, ip: ip, meta: meta}) - } - - canonDetails := make(map[string]canonical, len(cands)) - for _, c := range cands { - canonDetails[c.deviceID] = c - } - - // 3) Build a set of target duplicate device_ids per canonical - // and verify existence to avoid creating tombstones for non-existent IDs - var allTargets []string - - canonToTargets := make(map[string][]string) - - for _, c := range cands { - ipSet := make(map[string]struct{}) - if c.ip != "" { - ipSet[c.ip] = struct{}{} - } - // Parse all_ips comma-separated - if s, ok := c.meta["all_ips"]; ok && s != "" { - for _, tok := range strings.Split(s, ",") { - t := strings.TrimSpace(tok) - if t != "" { - ipSet[t] = struct{}{} - } - } - } - // Parse alt_ip: keys - for k := range c.meta { - if strings.HasPrefix(k, "alt_ip:") { - ip := strings.TrimPrefix(k, "alt_ip:") - if ip != "" { - ipSet[ip] = struct{}{} - } - } - } - // Build device_ids - for ip := range ipSet { - id := c.partition + ":" + ip - if id == c.deviceID { - continue - } - - canonToTargets[c.deviceID] = append(canonToTargets[c.deviceID], id) - allTargets = append(allTargets, id) - } - } - - if len(allTargets) == 0 { - log.Info().Msg("IP backfill: no alias targets found") - return nil - } - - // 4) Check which target device_ids actually exist and are not already merged - existing := make(map[string]struct{}) - - const targetChunk = 1000 - - for i := 0; i < len(allTargets); i += targetChunk { - end := i + targetChunk - if end > len(allTargets) { - end = len(allTargets) - } - - list := quoteList(allTargets[i:end]) - q := `SELECT device_id FROM table(unified_devices) - WHERE device_id IN (` + list + `) - AND NOT has(map_keys(metadata),'_merged_into') - ORDER BY _tp_time DESC` - - res, err := database.ExecuteQuery(ctx, q) - if err != nil { - return fmt.Errorf("query existing targets failed: %w", err) - } - - for _, r := range res { - if dev, ok := r["device_id"].(string); ok && dev != "" { - existing[dev] = struct{}{} - } - } - } - - // 5) Emit tombstones for existing, non-canonical target IDs - var tombstones []*models.DeviceUpdate - - const tombstoneChunk = 1000 - - emit := func(update *models.DeviceUpdate) error { - if update == nil { - return nil - } - if opts.DryRun || opts.SeedKVOnly { - return nil - } - - tombstones = append(tombstones, update) - if len(tombstones) < tombstoneChunk { - return nil - } - - if err := database.PublishBatchDeviceUpdates(ctx, tombstones); err != nil { - return fmt.Errorf("publish ip tombstones: %w", err) - } - tombstones = tombstones[:0] - return nil - } - - var emitted int - skippedByKV := 0 - - for canon, targets := range canonToTargets { - part := partitionFromDeviceID(canon) - - info, ok := canonDetails[canon] - if !ok { - info = canonical{deviceID: canon, partition: part} - } - - update := buildCanonicalUpdate(info) - if update == nil { - continue - } - - record := buildIdentityRecord(update) - if seeder != nil && record != nil { - if _, seedErr := seeder.seedRecord(ctx, record, identitymap.BuildKeys(update), opts.DryRun); seedErr != nil { - log.Warn(). - Err(seedErr). - Str("canonical_device", canon). - Msg("IP backfill: failed to seed canonical identity keys") - } - } - - for _, t := range targets { - if _, ok := existing[t]; !ok { - continue - } - // Build tombstone - aliasKey := identitymap.Key{Kind: identitymap.KindPartitionIP, Value: t} - aliasMatched := false - - if seeder != nil && record != nil { - if matches, seedErr := seeder.seedRecord(ctx, record, []identitymap.Key{aliasKey}, opts.DryRun); seedErr != nil { - log.Warn(). - Err(seedErr). - Str("alias_device", t). - Str("canonical_device", canon). - Msg("IP backfill: failed to seed partition-ip identity") - } else if matches != nil && matches[aliasKey] { - aliasMatched = true - } - } - - skip := opts.SeedKVOnly || aliasMatched - if skip { - if aliasMatched { - skippedByKV++ - } - continue - } - - emitted++ - tombstone := &models.DeviceUpdate{ - DeviceID: t, - Partition: part, - Source: models.DiscoverySourceIntegration, - Timestamp: time.Now(), - IsAvailable: false, - Metadata: map[string]string{"_merged_into": canon}, - } - - if err := emit(tombstone); err != nil { - return err - } - } - } - - if opts.DryRun { - log.Info(). - Int("ip_alias_tombstones_would_emit", emitted). - Int("kv_identity_skipped", skippedByKV). - Msg("IP backfill DRY-RUN completed") - return nil - } - - if len(tombstones) > 0 { - if opts.SeedKVOnly { - tombstones = tombstones[:0] - } else if err := database.PublishBatchDeviceUpdates(ctx, tombstones); err != nil { - return fmt.Errorf("publish ip tombstones: %w", err) - } - } - - if opts.SeedKVOnly { - log.Info(). - Int("kv_identity_skipped", skippedByKV). - Msg("IP backfill completed with KV seeding only") - return nil - } - - log.Info(). - Int("ip_alias_tombstones_emitted", emitted). - Int("kv_identity_skipped", skippedByKV). - Msg("IP backfill completed") - - return nil -} -func queryIdentityRows(ctx context.Context, database db.Service, sql string, kind identitymap.Kind) ([]identityRow, error) { - results, err := database.ExecuteQuery(ctx, sql) - if err != nil { - return nil, fmt.Errorf("identity query failed: %w", err) - } - - rows := make([]identityRow, 0, len(results)) - - for _, r := range results { - rd := identityRow{kind: kind} - if v, ok := r["device_id"].(string); ok { - rd.deviceID = v - } - - if v, ok := r["ip"].(string); ok { - rd.ip = v - } - // key may be nil if metadata is malformed; skip in builder - if v, ok := r["key"].(string); ok { - rd.key = v - } - - switch meta := r["metadata"].(type) { - case map[string]string: - rd.metadata = cloneMetadata(meta) - case map[string]interface{}: - converted := make(map[string]string, len(meta)) - for k, v := range meta { - if s, ok := v.(string); ok { - converted[k] = s - } - } - rd.metadata = converted - case nil: - rd.metadata = map[string]string{} - default: - rd.metadata = map[string]string{} - } - - switch t := r["_tp_time"].(type) { - case time.Time: - rd.ts = t - default: - rd.ts = time.Now() - } - - rows = append(rows, rd) - } - - return rows, nil -} - -func partitionFromDeviceID(deviceID string) string { - parts := strings.Split(deviceID, ":") - if len(parts) >= 2 { - return parts[0] - } - - return "default" -} - -// quoteList converts a list of string literals to a safely quoted IN(...) list -func quoteList(vals []string) string { - if len(vals) == 0 { - return "''" - } - - var b strings.Builder - - for i, v := range vals { - if i > 0 { - b.WriteString(",") - } - - b.WriteString("'") - b.WriteString(strings.ReplaceAll(v, "'", "''")) - b.WriteString("'") - } - - return b.String() -} diff --git a/pkg/core/backfill_test.go b/pkg/core/backfill_test.go deleted file mode 100644 index 04afa3f1c..000000000 --- a/pkg/core/backfill_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package core - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" - - "github.com/carverauto/serviceradar/pkg/db" - "github.com/carverauto/serviceradar/pkg/identitymap" - "github.com/carverauto/serviceradar/pkg/logger" - "github.com/carverauto/serviceradar/pkg/models" - syncpkg "github.com/carverauto/serviceradar/pkg/sync" - "github.com/carverauto/serviceradar/proto" -) - -func TestBackfillIdentityTombstonesSeedKVOnlySkipsPublishing(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockDB := db.NewMockService(ctrl) - mockKV := syncpkg.NewMockKVClient(ctrl) - log := logger.NewTestLogger() - - now := time.Now() - - armisRows := []map[string]interface{}{ - { - "device_id": "default:canonical", - "ip": "10.0.0.1", - "metadata": map[string]interface{}{ - "armis_device_id": "ARM-1", - }, - "key": "ARM-1", - "_tp_time": now.Add(time.Minute), - }, - { - "device_id": "default:duplicate", - "ip": "10.0.0.2", - "metadata": map[string]interface{}{ - "armis_device_id": "ARM-1", - }, - "key": "ARM-1", - "_tp_time": now, - }, - } - - gomock.InOrder( - mockDB.EXPECT().ExecuteQuery(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, query string, _ ...interface{}) ([]map[string]interface{}, error) { - require.Contains(t, query, "armis_device_id") - return armisRows, nil - }), - mockDB.EXPECT().ExecuteQuery(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, query string, _ ...interface{}) ([]map[string]interface{}, error) { - require.Contains(t, query, "integration_type") - return []map[string]interface{}{}, nil - }), - ) - - mockDB.EXPECT().PublishBatchDeviceUpdates(gomock.Any(), gomock.Any()).Times(0) - - mockKV.EXPECT().Get(gomock.Any(), gomock.Any()).Return(&proto.GetResponse{Found: false}, nil).Times(4) - mockKV.EXPECT().PutIfAbsent(gomock.Any(), gomock.Any()).Return(&proto.PutResponse{}, nil).Times(4) - mockKV.EXPECT().Update(gomock.Any(), gomock.Any()).Times(0) - - opts := BackfillOptions{SeedKVOnly: true} - - err := BackfillIdentityTombstones(context.Background(), mockDB, mockKV, log, opts) - require.NoError(t, err) -} - -func TestBackfillIdentityTombstonesPublishesWhenKVOutdated(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockDB := db.NewMockService(ctrl) - mockKV := syncpkg.NewMockKVClient(ctrl) - log := logger.NewTestLogger() - - now := time.Now() - - armisRows := []map[string]interface{}{ - { - "device_id": "default:canonical", - "ip": "10.0.0.1", - "metadata": map[string]interface{}{ - "armis_device_id": "ARM-1", - }, - "key": "ARM-1", - "_tp_time": now.Add(time.Minute), - }, - { - "device_id": "default:duplicate", - "ip": "10.0.0.2", - "metadata": map[string]interface{}{ - "armis_device_id": "ARM-1", - }, - "key": "ARM-1", - "_tp_time": now, - }, - } - - gomock.InOrder( - mockDB.EXPECT().ExecuteQuery(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, query string, _ ...interface{}) ([]map[string]interface{}, error) { - require.Contains(t, query, "armis_device_id") - return armisRows, nil - }), - mockDB.EXPECT().ExecuteQuery(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, query string, _ ...interface{}) ([]map[string]interface{}, error) { - require.Contains(t, query, "integration_type") - return []map[string]interface{}{}, nil - }), - ) - - staleRecord := &identitymap.Record{CanonicalDeviceID: "default:stale", Partition: "default", MetadataHash: "stale"} - stalePayload, err := identitymap.MarshalRecord(staleRecord) - require.NoError(t, err) - - mockKV.EXPECT().Get(gomock.Any(), gomock.Any()).Return(&proto.GetResponse{Found: true, Revision: 2, Value: stalePayload}, nil).Times(4) - mockKV.EXPECT().PutIfAbsent(gomock.Any(), gomock.Any()).Times(0) - mockKV.EXPECT().Update(gomock.Any(), gomock.Any()).Return(&proto.UpdateResponse{}, nil).Times(4) - - mockDB.EXPECT().PublishBatchDeviceUpdates(gomock.Any(), gomock.Len(1)).DoAndReturn( - func(_ context.Context, updates []*models.DeviceUpdate) error { - require.Len(t, updates, 1) - require.Equal(t, "default:duplicate", updates[0].DeviceID) - require.Equal(t, "default:canonical", updates[0].Metadata["_merged_into"]) - return nil - }).Times(1) - - opts := BackfillOptions{} - - err = BackfillIdentityTombstones(context.Background(), mockDB, mockKV, log, opts) - require.NoError(t, err) -} - -func TestBackfillIPAliasTombstonesSkipsWhenKVCanonical(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockDB := db.NewMockService(ctrl) - mockKV := syncpkg.NewMockKVClient(ctrl) - log := logger.NewTestLogger() - - canonicalMeta := map[string]interface{}{ - "armis_device_id": "ARM-1", - "all_ips": "10.0.0.2", - } - - canonicalRows := []map[string]interface{}{ - { - "device_id": "default:canonical", - "ip": "10.0.0.1", - "metadata": canonicalMeta, - "_tp_time": time.Now(), - }, - } - - gomock.InOrder( - mockDB.EXPECT().ExecuteQuery(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, query string, _ ...interface{}) ([]map[string]interface{}, error) { - require.Contains(t, query, "metadata") - return canonicalRows, nil - }), - mockDB.EXPECT().ExecuteQuery(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, query string, _ ...interface{}) ([]map[string]interface{}, error) { - require.Contains(t, query, "device_id FROM table(unified_devices)") - aliasQueryResult := []map[string]interface{}{{"device_id": "default:10.0.0.2"}} - return aliasQueryResult, nil - }), - ) - mockDB.EXPECT().PublishBatchDeviceUpdates(gomock.Any(), gomock.Any()).Times(0) - - record := &identitymap.Record{ - CanonicalDeviceID: "default:canonical", - Partition: "default", - MetadataHash: identitymap.HashIdentityMetadata(&models.DeviceUpdate{ - DeviceID: "default:canonical", - Partition: "default", - IP: "10.0.0.1", - Source: models.DiscoverySourceIntegration, - Metadata: map[string]string{"armis_device_id": "ARM-1", "all_ips": "10.0.0.2"}, - }), - } - payload, err := identitymap.MarshalRecord(record) - require.NoError(t, err) - - mockKV.EXPECT().Get(gomock.Any(), gomock.Any()).Return(&proto.GetResponse{Found: true, Value: payload, Revision: 3}, nil).AnyTimes() - mockKV.EXPECT().PutIfAbsent(gomock.Any(), gomock.Any()).Times(0) - mockKV.EXPECT().Update(gomock.Any(), gomock.Any()).Times(0) - - opts := BackfillOptions{} - - err = BackfillIPAliasTombstones(context.Background(), mockDB, mockKV, log, opts) - require.NoError(t, err) -} diff --git a/pkg/core/identity_lookup.go b/pkg/core/identity_lookup.go index be4091795..17b48a86b 100644 --- a/pkg/core/identity_lookup.go +++ b/pkg/core/identity_lookup.go @@ -9,7 +9,6 @@ import ( "go.opentelemetry.io/otel/attribute" otelcodes "go.opentelemetry.io/otel/codes" - "google.golang.org/grpc" grpccodes "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -23,14 +22,8 @@ var ( errUnsupportedIdentityKind = errors.New("identity lookup: unsupported identity kind") ) -type identityKVClient interface { - Get(ctx context.Context, in *proto.GetRequest, opts ...grpc.CallOption) (*proto.GetResponse, error) - PutIfAbsent(ctx context.Context, in *proto.PutRequest, opts ...grpc.CallOption) (*proto.PutResponse, error) - Update(ctx context.Context, in *proto.UpdateRequest, opts ...grpc.CallOption) (*proto.UpdateResponse, error) - Delete(ctx context.Context, in *proto.DeleteRequest, opts ...grpc.CallOption) (*proto.DeleteResponse, error) -} - -// GetCanonicalDevice resolves a set of identity keys to the canonical device record maintained in KV. +// GetCanonicalDevice resolves a set of identity keys to the canonical device record via CNPG. +// KV is not used for identity resolution - CNPG is the authoritative source. func (s *Server) GetCanonicalDevice(ctx context.Context, req *proto.GetCanonicalDeviceRequest) (*proto.GetCanonicalDeviceResponse, error) { ctx, span := s.tracer.Start(ctx, "GetCanonicalDevice") defer span.End() @@ -47,41 +40,14 @@ func (s *Server) GetCanonicalDevice(ctx context.Context, req *proto.GetCanonical return nil, status.Error(grpccodes.InvalidArgument, "identity keys are required") } - namespace := strings.TrimSpace(req.GetNamespace()) - if namespace == "" { - namespace = identitymap.DefaultNamespace - } - span.SetAttributes( attribute.Int("identity.count", len(req.GetIdentityKeys())), - attribute.String("namespace", namespace), ) // Normalize identity keys and append optional IP hint if provided. keys := normalizeIdentityKeys(req) - // Attempt KV lookups in order. - for _, key := range keys { - rec, revision, err := s.lookupIdentityFromKV(ctx, namespace, key) - if err != nil { - s.logger.Warn().Err(err).Str("key", key.KeyPath(namespace)).Msg("identity KV lookup failed") - span.RecordError(err) - continue - } - if rec != nil { - span.SetStatus(otelcodes.Ok, "resolved via kv") - resolvedVia = "kv" - found = true - return &proto.GetCanonicalDeviceResponse{ - Found: true, - Record: rec.ToProto(), - MatchedKey: key.ToProto(), - Revision: revision, - }, nil - } - } - - // Fallback to database correlation when KV misses. + // Resolve via CNPG-backed database lookup. record, matchedKey, err := s.lookupIdentityFromDB(ctx, keys) if err != nil { span.RecordError(err) @@ -94,55 +60,16 @@ func (s *Server) GetCanonicalDevice(ctx context.Context, req *proto.GetCanonical return &proto.GetCanonicalDeviceResponse{Found: false}, nil } - hydrate := false - if ok, err := s.hydrateIdentityKV(ctx, namespace, matchedKey, record); err != nil { - // Hydration failure is logged but does not fail the lookup response. - s.logger.Warn().Err(err).Str("key", matchedKey.KeyPath(namespace)).Msg("failed to hydrate identity kv") - span.RecordError(err) - } else { - hydrate = ok - } - - span.SetStatus(otelcodes.Ok, "resolved via db fallback") + span.SetStatus(otelcodes.Ok, "resolved via db") resolvedVia = "db" found = true return &proto.GetCanonicalDeviceResponse{ Found: true, Record: record.ToProto(), MatchedKey: matchedKey.ToProto(), - Hydrated: hydrate, }, nil } -func (s *Server) lookupIdentityFromKV(ctx context.Context, namespace string, key identitymap.Key) (*identitymap.Record, uint64, error) { - if s.identityKVClient == nil { - return nil, 0, nil - } - resp, err := s.identityKVClient.Get(ctx, &proto.GetRequest{Key: key.KeyPath(namespace)}) - if err != nil { - return nil, 0, err - } - if !resp.GetFound() || len(resp.GetValue()) == 0 { - return nil, resp.GetRevision(), nil - } - rec, err := identitymap.UnmarshalRecord(resp.GetValue()) - if err != nil { - if errors.Is(err, identitymap.ErrCorruptRecord) { - if s.logger != nil { - s.logger.Warn().Err(err).Str("key", key.KeyPath(namespace)).Msg("Skipping corrupt canonical identity record from KV") - } - if _, delErr := s.identityKVClient.Delete(ctx, &proto.DeleteRequest{Key: key.KeyPath(namespace)}); delErr != nil { - if s.logger != nil { - s.logger.Warn().Err(delErr).Str("key", key.KeyPath(namespace)).Msg("Failed to delete corrupt canonical identity record from KV") - } - } - return nil, resp.GetRevision(), nil - } - return nil, 0, err - } - return rec, resp.GetRevision(), nil -} - func (s *Server) lookupIdentityFromDB(ctx context.Context, keys []identitymap.Key) (*identitymap.Record, identitymap.Key, error) { if s.DB == nil { return nil, identitymap.Key{}, errDBServiceUnavailable @@ -225,61 +152,6 @@ func (s *Server) lookupDeviceIDByQuery(ctx context.Context, query string) (strin return id, nil } -func (s *Server) hydrateIdentityKV(ctx context.Context, namespace string, key identitymap.Key, record *identitymap.Record) (bool, error) { - if s.identityKVClient == nil || record == nil { - return false, nil - } - - payload, err := identitymap.MarshalRecord(record) - if err != nil { - return false, err - } - - _, err = s.identityKVClient.PutIfAbsent(ctx, &proto.PutRequest{Key: key.KeyPath(namespace), Value: payload}) - if err == nil { - return true, nil - } - - //exhaustive:ignore - switch status.Code(err) { - case grpccodes.AlreadyExists: - resp, getErr := s.identityKVClient.Get(ctx, &proto.GetRequest{Key: key.KeyPath(namespace)}) - if getErr != nil { - return false, getErr - } - existing, unmarshalErr := identitymap.UnmarshalRecord(resp.GetValue()) - if unmarshalErr != nil { - if errors.Is(unmarshalErr, identitymap.ErrCorruptRecord) { - if s.logger != nil { - s.logger.Warn().Err(unmarshalErr).Str("key", key.KeyPath(namespace)).Msg("Overwriting corrupt canonical identity entry during hydration") - } - } else { - return false, unmarshalErr - } - } - if existing != nil && existing.MetadataHash == record.MetadataHash { - return false, nil - } - _, updErr := s.identityKVClient.Update(ctx, &proto.UpdateRequest{ - Key: key.KeyPath(namespace), - Value: payload, - Revision: resp.GetRevision(), - TtlSeconds: 0, - }) - if updErr != nil { - if status.Code(updErr) == grpccodes.Aborted { - return false, nil - } - return false, updErr - } - return true, nil - case grpccodes.Unimplemented: - return false, nil - default: - return false, err - } -} - func buildRecordFromUnifiedDevice(device *models.UnifiedDevice) *identitymap.Record { if device == nil { return nil @@ -452,3 +324,13 @@ func netboxLookupQuery(id string) string { ORDER BY _tp_time DESC LIMIT 1`, esc, esc) } + +// partitionFromDeviceID extracts the partition prefix from a device ID. +// Device IDs have the format "partition:ip" or similar compound keys. +func partitionFromDeviceID(deviceID string) string { + parts := strings.Split(deviceID, ":") + if len(parts) >= 2 { + return parts[0] + } + return "default" +} diff --git a/pkg/core/identity_lookup_test.go b/pkg/core/identity_lookup_test.go index aff3ed54d..5bb5d8008 100644 --- a/pkg/core/identity_lookup_test.go +++ b/pkg/core/identity_lookup_test.go @@ -3,12 +3,10 @@ package core import ( "context" "testing" - "time" "github.com/stretchr/testify/require" nooptrace "go.opentelemetry.io/otel/trace/noop" "go.uber.org/mock/gomock" - "google.golang.org/grpc" "github.com/carverauto/serviceradar/pkg/db" "github.com/carverauto/serviceradar/pkg/identitymap" @@ -18,79 +16,69 @@ import ( identitymappb "github.com/carverauto/serviceradar/proto/identitymap/v1" ) -type fakeIdentityKV struct { - getFn func(ctx context.Context, in *proto.GetRequest) (*proto.GetResponse, error) - putIfAbsentFn func(ctx context.Context, in *proto.PutRequest) (*proto.PutResponse, error) - updateFn func(ctx context.Context, in *proto.UpdateRequest) (*proto.UpdateResponse, error) - putCalls int -} +func TestGetCanonicalDevice_FromDB(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockDB := db.NewMockService(ctrl) -func (f *fakeIdentityKV) Get(ctx context.Context, in *proto.GetRequest, _ ...grpc.CallOption) (*proto.GetResponse, error) { - if f.getFn != nil { - return f.getFn(ctx, in) + device := &models.UnifiedDevice{ + DeviceID: "tenant:device-1", + IP: "10.10.0.5", + Metadata: &models.DiscoveredField[map[string]string]{Value: map[string]string{"armis_device_id": "armis-1"}}, + DiscoverySources: []models.DiscoverySourceInfo{{Source: models.DiscoverySourceArmis}}, } - return &proto.GetResponse{}, nil -} -func (f *fakeIdentityKV) PutIfAbsent(ctx context.Context, in *proto.PutRequest, _ ...grpc.CallOption) (*proto.PutResponse, error) { - f.putCalls++ - if f.putIfAbsentFn != nil { - return f.putIfAbsentFn(ctx, in) + mockDB.EXPECT().GetUnifiedDevice(gomock.Any(), "tenant:device-1").Return(device, nil) + + server := &Server{ + DB: mockDB, + logger: logger.NewTestLogger(), + tracer: nooptrace.NewTracerProvider().Tracer("test"), } - return &proto.PutResponse{}, nil -} -func (f *fakeIdentityKV) Update(ctx context.Context, in *proto.UpdateRequest, _ ...grpc.CallOption) (*proto.UpdateResponse, error) { - if f.updateFn != nil { - return f.updateFn(ctx, in) + req := &proto.GetCanonicalDeviceRequest{ + IdentityKeys: []*identitymappb.IdentityKey{ + identitymap.Key{Kind: identitymap.KindDeviceID, Value: "tenant:device-1"}.ToProto(), + }, } - return &proto.UpdateResponse{}, nil -} -func (f *fakeIdentityKV) Delete(ctx context.Context, in *proto.DeleteRequest, _ ...grpc.CallOption) (*proto.DeleteResponse, error) { - return &proto.DeleteResponse{}, nil + resp, err := server.GetCanonicalDevice(context.Background(), req) + require.NoError(t, err) + require.True(t, resp.GetFound()) + require.Equal(t, "tenant:device-1", resp.GetRecord().GetCanonicalDeviceId()) } -func TestGetCanonicalDevice_FromKV(t *testing.T) { +func TestGetCanonicalDevice_NotFound(t *testing.T) { t.Parallel() - now := time.Now().UTC() - rec := &identitymap.Record{ - CanonicalDeviceID: "tenant:device-1", - Partition: "tenant", - MetadataHash: "deadbeef", - UpdatedAt: now, - } - payload, err := identitymap.MarshalRecord(rec) - require.NoError(t, err) + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) - kv := &fakeIdentityKV{ - getFn: func(_ context.Context, _ *proto.GetRequest) (*proto.GetResponse, error) { - return &proto.GetResponse{Value: payload, Found: true, Revision: 42}, nil - }, - } + mockDB := db.NewMockService(ctrl) + + mockDB.EXPECT().GetUnifiedDevice(gomock.Any(), "nonexistent").Return(nil, nil) server := &Server{ - identityKVClient: kv, - logger: logger.NewTestLogger(), - tracer: nooptrace.NewTracerProvider().Tracer("test"), + DB: mockDB, + logger: logger.NewTestLogger(), + tracer: nooptrace.NewTracerProvider().Tracer("test"), } req := &proto.GetCanonicalDeviceRequest{ IdentityKeys: []*identitymappb.IdentityKey{ - identitymap.Key{Kind: identitymap.KindDeviceID, Value: "tenant:device-1"}.ToProto(), + identitymap.Key{Kind: identitymap.KindDeviceID, Value: "nonexistent"}.ToProto(), }, } resp, err := server.GetCanonicalDevice(context.Background(), req) require.NoError(t, err) - require.True(t, resp.GetFound()) - require.Equal(t, uint64(42), resp.GetRevision()) - require.Equal(t, "tenant:device-1", resp.GetRecord().GetCanonicalDeviceId()) - require.False(t, resp.GetHydrated()) + require.False(t, resp.GetFound()) } -func TestGetCanonicalDevice_FallbackHydratesKV(t *testing.T) { +func TestGetCanonicalDevice_ByIP(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -99,33 +87,47 @@ func TestGetCanonicalDevice_FallbackHydratesKV(t *testing.T) { mockDB := db.NewMockService(ctrl) device := &models.UnifiedDevice{ - DeviceID: "tenant:device-2", - IP: "10.10.0.5", - Metadata: &models.DiscoveredField[map[string]string]{Value: map[string]string{"armis_device_id": "armis-2"}}, - DiscoverySources: []models.DiscoverySourceInfo{{Source: models.DiscoverySourceArmis}}, + DeviceID: "sr:12345678-1234-1234-1234-123456789abc", + IP: "192.168.1.100", + Metadata: &models.DiscoveredField[map[string]string]{Value: map[string]string{}}, + DiscoverySources: []models.DiscoverySourceInfo{{Source: models.DiscoverySourceSweep}}, } - mockDB.EXPECT().GetUnifiedDevice(gomock.Any(), "tenant:device-2").Return(device, nil) - - kv := &fakeIdentityKV{} + mockDB.EXPECT().GetUnifiedDevicesByIPsOrIDs(gomock.Any(), []string{"192.168.1.100"}, nil).Return([]*models.UnifiedDevice{device}, nil) server := &Server{ - DB: mockDB, - identityKVClient: kv, - logger: logger.NewTestLogger(), - tracer: nooptrace.NewTracerProvider().Tracer("test"), + DB: mockDB, + logger: logger.NewTestLogger(), + tracer: nooptrace.NewTracerProvider().Tracer("test"), } req := &proto.GetCanonicalDeviceRequest{ IdentityKeys: []*identitymappb.IdentityKey{ - identitymap.Key{Kind: identitymap.KindDeviceID, Value: "tenant:device-2"}.ToProto(), + identitymap.Key{Kind: identitymap.KindIP, Value: "192.168.1.100"}.ToProto(), }, } resp, err := server.GetCanonicalDevice(context.Background(), req) require.NoError(t, err) require.True(t, resp.GetFound()) - require.True(t, resp.GetHydrated()) - require.Equal(t, "tenant:device-2", resp.GetRecord().GetCanonicalDeviceId()) - require.Equal(t, 1, kv.putCalls) + require.Equal(t, "sr:12345678-1234-1234-1234-123456789abc", resp.GetRecord().GetCanonicalDeviceId()) +} + +func TestGetCanonicalDevice_InvalidRequest(t *testing.T) { + t.Parallel() + + server := &Server{ + logger: logger.NewTestLogger(), + tracer: nooptrace.NewTracerProvider().Tracer("test"), + } + + // Test nil request + resp, err := server.GetCanonicalDevice(context.Background(), nil) + require.Error(t, err) + require.Nil(t, resp) + + // Test empty keys + resp, err = server.GetCanonicalDevice(context.Background(), &proto.GetCanonicalDeviceRequest{}) + require.Error(t, err) + require.Nil(t, resp) } diff --git a/pkg/core/result_processor.go b/pkg/core/result_processor.go index 7807225a4..f26fab3b0 100644 --- a/pkg/core/result_processor.go +++ b/pkg/core/result_processor.go @@ -19,15 +19,12 @@ package core import ( "context" "encoding/json" - "errors" "fmt" "strconv" "strings" "time" - "github.com/carverauto/serviceradar/pkg/identitymap" "github.com/carverauto/serviceradar/pkg/models" - "github.com/carverauto/serviceradar/proto" ) const ( @@ -171,7 +168,6 @@ func addPortMetadata(metadata map[string]string, portResults []*models.PortResul } } -//nolint:gocyclo // evaluation needs to aggregate multiple heuristics per host result func (s *Server) lookupCanonicalSweepIdentities(ctx context.Context, hosts []models.HostResult) map[string]canonicalSnapshot { if len(hosts) == 0 { return nil @@ -214,20 +210,8 @@ func (s *Server) lookupCanonicalSweepIdentities(ctx context.Context, hosts []mod return result } - if kvHits, kvMisses := s.fetchCanonicalSnapshotsFromKV(ctx, cacheMisses); len(kvHits) > 0 { - for ip, snapshot := range kvHits { - result[ip] = snapshot - if s.canonicalCache != nil { - s.canonicalCache.store(ip, snapshot) - } - } - cacheMisses = kvMisses - } - - if len(cacheMisses) == 0 { - return result - } - + // KV is not used for identity resolution - CNPG is the authoritative source. + // Resolve via DeviceRegistry (in-memory) then CNPG. remaining := cacheMisses if s.DeviceRegistry != nil { @@ -265,7 +249,6 @@ func (s *Server) lookupCanonicalSweepIdentities(ctx context.Context, hosts []mod if s.canonicalCache != nil { s.canonicalCache.store(ip, snapshot) } - s.persistIdentityForSnapshot(ctx, snapshot, device) break } @@ -319,7 +302,6 @@ func (s *Server) lookupCanonicalSweepIdentities(ctx context.Context, hosts []mod if s.canonicalCache != nil { s.canonicalCache.store(ip, snapshot) } - s.persistIdentityForSnapshot(ctx, snapshot, device) } } @@ -422,130 +404,3 @@ func (s *Server) applyCanonicalSnapshotToSweep(update *models.DeviceUpdate, snap copyIfEmpty("canonical_metadata_hash") copyIfEmpty("canonical_hostname") } - -func (s *Server) fetchCanonicalSnapshotsFromKV(ctx context.Context, ips []string) (map[string]canonicalSnapshot, []string) { - if s.identityKVClient == nil || len(ips) == 0 { - return nil, ips - } - - namespace := identitymap.DefaultNamespace - hits := make(map[string]canonicalSnapshot, len(ips)) - misses := make([]string, 0, len(ips)) - seen := make(map[string]struct{}, len(ips)) - - for _, rawIP := range ips { - ip := strings.TrimSpace(rawIP) - if ip == "" { - continue - } - if _, ok := seen[ip]; ok { - continue - } - seen[ip] = struct{}{} - - key := identitymap.Key{Kind: identitymap.KindIP, Value: ip} - record := s.loadIdentityRecord(ctx, namespace, key) - if record == nil { - misses = append(misses, ip) - continue - } - - snapshot := snapshotFromIdentityRecord(record, ip) - if !snapshotHasStrongIdentity(snapshot) { - misses = append(misses, ip) - continue - } - - hits[ip] = snapshot - } - - return hits, misses -} - -func (s *Server) loadIdentityRecord(ctx context.Context, namespace string, key identitymap.Key) *identitymap.Record { - if s.identityKVClient == nil { - return nil - } - - for _, path := range key.KeyPathVariants(namespace) { - resp, err := s.identityKVClient.Get(ctx, &proto.GetRequest{Key: path}) - if err != nil { - if s.logger != nil { - s.logger.Debug().Err(err).Str("key", path).Msg("identity KV lookup failed") - } - continue - } - if !resp.GetFound() || len(resp.GetValue()) == 0 { - continue - } - record, err := identitymap.UnmarshalRecord(resp.GetValue()) - if err != nil { - if errors.Is(err, identitymap.ErrCorruptRecord) { - if s.logger != nil { - s.logger.Warn().Err(err).Str("key", path).Msg("Skipping corrupt canonical identity record during KV lookup") - } - continue - } - if s.logger != nil { - s.logger.Warn().Err(err).Str("key", path).Msg("Failed to decode canonical identity record") - } - continue - } - return record - } - - return nil -} - -func snapshotFromIdentityRecord(record *identitymap.Record, fallbackIP string) canonicalSnapshot { - if record == nil { - return canonicalSnapshot{} - } - - snapshot := canonicalSnapshot{ - DeviceID: strings.TrimSpace(record.CanonicalDeviceID), - IP: strings.TrimSpace(fallbackIP), - } - - if record.Attributes != nil { - if ip := strings.TrimSpace(record.Attributes["ip"]); ip != "" { - snapshot.IP = ip - } - if mac := strings.TrimSpace(record.Attributes["mac"]); mac != "" { - snapshot.MAC = mac - } - } - - return snapshot -} - -func (s *Server) persistIdentityForSnapshot(ctx context.Context, snapshot canonicalSnapshot, device *models.UnifiedDevice) { - if s.identityKVClient == nil || device == nil { - return - } - - record := buildRecordFromUnifiedDevice(device) - if record == nil { - return - } - - keys := []identitymap.Key{ - {Kind: identitymap.KindIP, Value: snapshot.IP}, - } - - if partition := strings.TrimSpace(record.Partition); partition != "" && snapshot.IP != "" { - keys = append(keys, identitymap.Key{ - Kind: identitymap.KindPartitionIP, - Value: fmt.Sprintf("%s:%s", partition, snapshot.IP), - }) - } - - for _, key := range keys { - if strings.TrimSpace(key.Value) == "" { - continue - } - if _, err := s.hydrateIdentityKV(ctx, identitymap.DefaultNamespace, key, record); err != nil && s.logger != nil { - s.logger.Debug().Err(err).Str("key", key.Value).Msg("Failed to hydrate identity map during sweep lookup") - } - } -} diff --git a/pkg/core/server.go b/pkg/core/server.go index 8145a2f00..8f0415892 100644 --- a/pkg/core/server.go +++ b/pkg/core/server.go @@ -131,19 +131,17 @@ func NewServer(ctx context.Context, config *models.CoreServiceConfig, spireClien metricsManager := metrics.NewManager(metricsConfig, database, log) - // Initialize KV client for features that still need it (edge onboarding, identity lookups) - // Note: The identity map PUBLISHER is disabled - we no longer write identity keys to KV - // because it caused massive write amplification (5-6 keys per device = 300 writes/sec). - // Identity RESOLUTION now uses CNPG (unified_devices table) instead. + // Initialize KV client for edge onboarding. + // Note: KV is NOT used for identity resolution - CNPG is the authoritative source. var ( - kvClient proto.KVServiceClient - identityKVCloser func() error + kvClient proto.KVServiceClient + kvClientCloser func() error ) if client, closer, err := cfgutil.NewKVServiceClientFromEnv(ctx, models.RoleCore); err != nil { log.Warn().Err(err).Msg("Failed to initialize KV client") } else if client != nil { kvClient = client - identityKVCloser = closer + kvClientCloser = closer } // Initialize the authoritative device registry with DIRE (Device Identity and Reconciliation Engine) @@ -239,8 +237,7 @@ func NewServer(ctx context.Context, config *models.CoreServiceConfig, spireClien pollerStatusUpdates: make(map[string]*models.PollerStatus), logger: log, tracer: otel.Tracer("serviceradar-core"), - identityKVClient: kvClient, - identityKVCloser: identityKVCloser, + kvClientCloser: kvClientCloser, canonicalCache: newCanonicalCache(10 * time.Minute), deviceSearchPlanner: deviceSearchPlanner, templateRegistry: templateregistry.New(log), @@ -483,11 +480,11 @@ func (s *Server) Stop(ctx context.Context) error { s.grpcServer.Stop(ctx) } - if s.identityKVCloser != nil { - if err := s.identityKVCloser(); err != nil { - s.logger.Warn().Err(err).Msg("Failed to close identity map KV client") + if s.kvClientCloser != nil { + if err := s.kvClientCloser(); err != nil { + s.logger.Warn().Err(err).Msg("Failed to close KV client") } - s.identityKVCloser = nil + s.kvClientCloser = nil } // MCP server support removed @@ -507,18 +504,6 @@ func (s *Server) Stop(ctx context.Context) error { return nil } -// IdentityKVClient exposes the KV client used for canonical identity operations. -func (s *Server) IdentityKVClient() identityKVClient { - if s == nil { - return nil - } - - s.mu.RLock() - defer s.mu.RUnlock() - - return s.identityKVClient -} - // GetMetricsManager returns the metrics collector instance. func (s *Server) GetMetricsManager() metrics.MetricCollector { return s.metrics diff --git a/pkg/core/types.go b/pkg/core/types.go index 2c9a7416d..1f2a1e56f 100644 --- a/pkg/core/types.go +++ b/pkg/core/types.go @@ -73,8 +73,7 @@ type Server struct { DeviceRegistry registry.Manager ServiceRegistry registry.ServiceManager deviceSearchPlanner *search.Planner - identityKVClient identityKVClient - identityKVCloser func() error + kvClientCloser func() error // closer for KV connection (used for edge onboarding) eventPublisher *natsutil.EventPublisher natsConn *nats.Conn discoveryService DiscoveryService diff --git a/pkg/db/BUILD.bazel b/pkg/db/BUILD.bazel index df34023f3..7b69b81bf 100644 --- a/pkg/db/BUILD.bazel +++ b/pkg/db/BUILD.bazel @@ -34,6 +34,7 @@ go_library( "mock_db.go", "netflow.go", "pollers.go", + "pgx_batch_helper.go", "sql_parser.go", "services.go", "sweep.go", @@ -67,11 +68,15 @@ go_test( "cnpg_sweep_test.go", "cnpg_observability_test.go", "json_helper_test.go", + "pgx_batch_behavior_test.go", + "pgx_batch_helper_test.go", "services_json_test.go", ], embed = [":db"], deps = [ "//pkg/models", + "@com_github_jackc_pgx_v5//:pgx", + "@com_github_jackc_pgx_v5//pgconn:pgconn", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", ], diff --git a/pkg/db/auth.go b/pkg/db/auth.go index 86a5bf00f..52d2cf651 100644 --- a/pkg/db/auth.go +++ b/pkg/db/auth.go @@ -157,8 +157,7 @@ func (db *DB) StoreBatchUsers(ctx context.Context, users []*models.User) error { ) } - br := db.pgPool.SendBatch(ctx, batch) - if err := br.Close(); err != nil { + if err := sendBatchExecAll(ctx, batch, db.conn().SendBatch, "users"); err != nil { return fmt.Errorf("failed to store batch users: %w", err) } diff --git a/pkg/db/cnpg/migrations/00000000000002_otel_metrics_unit_and_agg.up.sql b/pkg/db/cnpg/migrations/00000000000002_otel_metrics_unit_and_agg.up.sql new file mode 100644 index 000000000..75af9d428 --- /dev/null +++ b/pkg/db/cnpg/migrations/00000000000002_otel_metrics_unit_and_agg.up.sql @@ -0,0 +1,63 @@ +-- Migration: Add unit column to otel_metrics and create continuous aggregation for stats +-- This migration adds: +-- 1. A `unit` column to store the metric's unit of measurement (e.g., "ms", "bytes", "1") +-- 2. A continuous aggregation for hourly metrics stats to improve query performance + +-- Add unit column to otel_metrics +ALTER TABLE otel_metrics ADD COLUMN IF NOT EXISTS unit TEXT; + +-- Create index on unit for filtering +CREATE INDEX IF NOT EXISTS idx_otel_metrics_unit ON otel_metrics (unit); + +-- Create continuous aggregation for otel_metrics hourly stats +-- This provides pre-computed counts for total, errors, and slow spans +CREATE MATERIALIZED VIEW IF NOT EXISTS otel_metrics_hourly_stats +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 hour', timestamp) AS bucket, + service_name, + metric_type, + COUNT(*) AS total_count, + COUNT(*) FILTER (WHERE is_slow = true) AS slow_count, + COUNT(*) FILTER (WHERE + level IN ('error', 'ERROR', 'Error') OR + http_status_code LIKE '4%' OR + http_status_code LIKE '5%' OR + (grpc_status_code IS NOT NULL AND grpc_status_code <> '0' AND grpc_status_code <> '') + ) AS error_count, + COUNT(*) FILTER (WHERE http_status_code LIKE '4%') AS http_4xx_count, + COUNT(*) FILTER (WHERE http_status_code LIKE '5%') AS http_5xx_count, + COUNT(*) FILTER (WHERE grpc_status_code IS NOT NULL AND grpc_status_code <> '0' AND grpc_status_code <> '') AS grpc_error_count, + AVG(duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS avg_duration_ms, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS p95_duration_ms, + MAX(duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS max_duration_ms +FROM otel_metrics +GROUP BY bucket, service_name, metric_type +WITH NO DATA; + +-- Add refresh policy to update the continuous aggregation every 15 minutes +-- The end_offset must be >= bucket size (1 hour) for TimescaleDB +SELECT add_continuous_aggregate_policy('otel_metrics_hourly_stats', + start_offset => INTERVAL '3 hours', + end_offset => INTERVAL '1 hour', + schedule_interval => INTERVAL '15 minutes', + if_not_exists => TRUE +); + +-- Create index on the continuous aggregation for efficient time-range queries +CREATE INDEX IF NOT EXISTS idx_otel_metrics_hourly_stats_bucket +ON otel_metrics_hourly_stats (bucket DESC); + +CREATE INDEX IF NOT EXISTS idx_otel_metrics_hourly_stats_service_bucket +ON otel_metrics_hourly_stats (service_name, bucket DESC); + +-- Grant permissions to spire role if it exists +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'spire') THEN + GRANT SELECT ON TABLE otel_metrics_hourly_stats TO spire; + END IF; +END $$; + +-- Add comment explaining the unit column +COMMENT ON COLUMN otel_metrics.unit IS 'Unit of measurement for the metric value (e.g., "ms", "s", "bytes", "1" for counts)'; diff --git a/pkg/db/cnpg_identity_engine.go b/pkg/db/cnpg_identity_engine.go index df65139a4..9aa540e9b 100644 --- a/pkg/db/cnpg_identity_engine.go +++ b/pkg/db/cnpg_identity_engine.go @@ -45,7 +45,8 @@ LIMIT 1` SELECT identifier_value, device_id FROM device_identifiers WHERE identifier_type = $1 - AND identifier_value = ANY($2)` + AND identifier_value = ANY($2) + AND partition = $3` // SQL for upserting device identifiers (new schema with partition) upsertDeviceIdentifierNewSQL = ` @@ -88,14 +89,18 @@ func (db *DB) GetDeviceIDByIdentifier(ctx context.Context, identifierType, ident // BatchGetDeviceIDsByIdentifier looks up device IDs for multiple identifier values of the same type. // Returns a map of identifier_value -> device_id. -func (db *DB) BatchGetDeviceIDsByIdentifier(ctx context.Context, identifierType string, identifierValues []string) (map[string]string, error) { +func (db *DB) BatchGetDeviceIDsByIdentifier(ctx context.Context, identifierType string, identifierValues []string, partition string) (map[string]string, error) { result := make(map[string]string) if !db.useCNPGWrites() || identifierType == "" || len(identifierValues) == 0 { return result, nil } - rows, err := db.conn().Query(ctx, batchGetDeviceIDsByIdentifierSQL, identifierType, identifierValues) + if partition == "" { + partition = defaultPartitionValue + } + + rows, err := db.conn().Query(ctx, batchGetDeviceIDsByIdentifierSQL, identifierType, identifierValues, partition) if err != nil { return result, err } diff --git a/pkg/db/cnpg_observability.go b/pkg/db/cnpg_observability.go index 1a6132dbd..7b42cf0a8 100644 --- a/pkg/db/cnpg_observability.go +++ b/pkg/db/cnpg_observability.go @@ -17,19 +17,8 @@ const ( defaultTracesTable = "otel_traces" ) -// InsertOTELLogs persists OTEL log rows into the configured CNPG table. -func (db *DB) InsertOTELLogs(ctx context.Context, table string, rows []models.OTELLogRow) error { - if len(rows) == 0 { - return nil - } - - if !db.cnpgConfigured() { - return ErrCNPGUnavailable - } - - sanitized, canonical := sanitizeObservabilityTable(table, defaultLogsTable) - - query := fmt.Sprintf(`INSERT INTO %s ( +const ( + otelLogsInsertSQL = `INSERT INTO %s ( timestamp, trace_id, span_id, @@ -47,50 +36,9 @@ func (db *DB) InsertOTELLogs(ctx context.Context, table string, rows []models.OT $1,$2,$3,$4,$5, $6,$7,$8,$9,$10, $11,$12,$13 - ) ON CONFLICT DO NOTHING`, sanitized) - - batch := &pgx.Batch{} - now := time.Now().UTC() + ) ON CONFLICT DO NOTHING` - for i := range rows { - ts := rows[i].Timestamp - if ts.IsZero() { - ts = now - } - - batch.Queue(query, - ts.UTC(), - rows[i].TraceID, - rows[i].SpanID, - rows[i].SeverityText, - rows[i].SeverityNumber, - rows[i].Body, - rows[i].ServiceName, - rows[i].ServiceVersion, - rows[i].ServiceInstance, - rows[i].ScopeName, - rows[i].ScopeVersion, - rows[i].Attributes, - rows[i].ResourceAttributes, - ) - } - - return db.sendCNPG(ctx, batch, fmt.Sprintf("%s logs", canonical)) -} - -// InsertOTELMetrics persists OTEL metric rows into the configured CNPG table. -func (db *DB) InsertOTELMetrics(ctx context.Context, table string, rows []models.OTELMetricRow) error { - if len(rows) == 0 { - return nil - } - - if !db.cnpgConfigured() { - return ErrCNPGUnavailable - } - - sanitized, canonical := sanitizeObservabilityTable(table, defaultMetricsTable) - - query := fmt.Sprintf(`INSERT INTO %s ( + otelMetricsInsertSQL = `INSERT INTO %s ( timestamp, trace_id, span_id, @@ -108,61 +56,16 @@ func (db *DB) InsertOTELMetrics(ctx context.Context, table string, rows []models grpc_status_code, is_slow, component, - level + level, + unit ) VALUES ( $1,$2,$3,$4,$5, $6,$7,$8,$9,$10, $11,$12,$13,$14,$15, - $16,$17,$18 - ) ON CONFLICT DO NOTHING`, sanitized) - - batch := &pgx.Batch{} - now := time.Now().UTC() - - for i := range rows { - ts := rows[i].Timestamp - if ts.IsZero() { - ts = now - } - - batch.Queue(query, - ts.UTC(), - rows[i].TraceID, - rows[i].SpanID, - rows[i].ServiceName, - rows[i].SpanName, - rows[i].SpanKind, - rows[i].DurationMs, - rows[i].DurationSeconds, - rows[i].MetricType, - rows[i].HTTPMethod, - rows[i].HTTPRoute, - rows[i].HTTPStatusCode, - rows[i].GRPCService, - rows[i].GRPCMethod, - rows[i].GRPCStatusCode, - rows[i].IsSlow, - rows[i].Component, - rows[i].Level, - ) - } - - return db.sendCNPG(ctx, batch, fmt.Sprintf("%s metrics", canonical)) -} - -// InsertOTELTraces persists OTEL trace rows into the configured CNPG table. -func (db *DB) InsertOTELTraces(ctx context.Context, table string, rows []models.OTELTraceRow) error { - if len(rows) == 0 { - return nil - } - - if !db.cnpgConfigured() { - return ErrCNPGUnavailable - } - - sanitized, canonical := sanitizeObservabilityTable(table, defaultTracesTable) + $16,$17,$18,$19 + ) ON CONFLICT DO NOTHING` - query := fmt.Sprintf(`INSERT INTO %s ( + otelTracesInsertSQL = `INSERT INTO %s ( timestamp, trace_id, span_id, @@ -187,41 +90,195 @@ func (db *DB) InsertOTELTraces(ctx context.Context, table string, rows []models. $6,$7,$8,$9,$10, $11,$12,$13,$14,$15, $16,$17,$18,$19 - ) ON CONFLICT DO NOTHING`, sanitized) + ) ON CONFLICT DO NOTHING` +) + +type otelRowInserter interface { + RowCount() int + TimestampAt(rowIndex int) time.Time + QueueRow(batch *pgx.Batch, query string, rowIndex int, timestamp time.Time) +} + +type otelLogInserter struct { + rows []models.OTELLogRow +} + +func (inserter otelLogInserter) RowCount() int { return len(inserter.rows) } + +func (inserter otelLogInserter) TimestampAt(rowIndex int) time.Time { + return inserter.rows[rowIndex].Timestamp +} + +func (inserter otelLogInserter) QueueRow(batch *pgx.Batch, query string, rowIndex int, timestamp time.Time) { + row := inserter.rows[rowIndex] + batch.Queue(query, + timestamp, + row.TraceID, + row.SpanID, + row.SeverityText, + row.SeverityNumber, + row.Body, + row.ServiceName, + row.ServiceVersion, + row.ServiceInstance, + row.ScopeName, + row.ScopeVersion, + row.Attributes, + row.ResourceAttributes, + ) +} + +type otelMetricInserter struct { + rows []models.OTELMetricRow +} + +func (inserter otelMetricInserter) RowCount() int { return len(inserter.rows) } + +func (inserter otelMetricInserter) TimestampAt(rowIndex int) time.Time { + return inserter.rows[rowIndex].Timestamp +} + +func (inserter otelMetricInserter) QueueRow(batch *pgx.Batch, query string, rowIndex int, timestamp time.Time) { + row := inserter.rows[rowIndex] + batch.Queue(query, + timestamp, + row.TraceID, + row.SpanID, + row.ServiceName, + row.SpanName, + row.SpanKind, + row.DurationMs, + row.DurationSeconds, + row.MetricType, + row.HTTPMethod, + row.HTTPRoute, + row.HTTPStatusCode, + row.GRPCService, + row.GRPCMethod, + row.GRPCStatusCode, + row.IsSlow, + row.Component, + row.Level, + row.Unit, + ) +} + +type otelTraceInserter struct { + rows []models.OTELTraceRow +} + +func (inserter otelTraceInserter) RowCount() int { return len(inserter.rows) } + +func (inserter otelTraceInserter) TimestampAt(rowIndex int) time.Time { + return inserter.rows[rowIndex].Timestamp +} + +func (inserter otelTraceInserter) QueueRow(batch *pgx.Batch, query string, rowIndex int, timestamp time.Time) { + row := inserter.rows[rowIndex] + batch.Queue(query, + timestamp, + row.TraceID, + row.SpanID, + row.ParentSpanID, + row.Name, + row.Kind, + row.StartTimeUnixNano, + row.EndTimeUnixNano, + row.ServiceName, + row.ServiceVersion, + row.ServiceInstance, + row.ScopeName, + row.ScopeVersion, + row.StatusCode, + row.StatusMessage, + row.Attributes, + row.ResourceAttributes, + row.Events, + row.Links, + ) +} + +func buildOTELLogsInsertQuery(sanitizedTable string) string { + return fmt.Sprintf(otelLogsInsertSQL, sanitizedTable) +} + +func buildOTELMetricsInsertQuery(sanitizedTable string) string { + return fmt.Sprintf(otelMetricsInsertSQL, sanitizedTable) +} + +func buildOTELTracesInsertQuery(sanitizedTable string) string { + return fmt.Sprintf(otelTracesInsertSQL, sanitizedTable) +} + +func (db *DB) insertOTELRows( + ctx context.Context, + table string, + defaultTable string, + kind string, + rowCount int, + buildQuery func(sanitizedTable string) string, + timestampAt func(rowIndex int) time.Time, + queueRow func(batch *pgx.Batch, query string, rowIndex int, timestamp time.Time), +) error { + if rowCount == 0 { + return nil + } + + if !db.cnpgConfigured() { + return ErrCNPGUnavailable + } + + sanitizedTable, canonicalTable := sanitizeObservabilityTable(table, defaultTable) + query := buildQuery(sanitizedTable) batch := &pgx.Batch{} now := time.Now().UTC() - for i := range rows { - ts := rows[i].Timestamp + for rowIndex := 0; rowIndex < rowCount; rowIndex++ { + ts := timestampAt(rowIndex) if ts.IsZero() { ts = now } - batch.Queue(query, - ts.UTC(), - rows[i].TraceID, - rows[i].SpanID, - rows[i].ParentSpanID, - rows[i].Name, - rows[i].Kind, - rows[i].StartTimeUnixNano, - rows[i].EndTimeUnixNano, - rows[i].ServiceName, - rows[i].ServiceVersion, - rows[i].ServiceInstance, - rows[i].ScopeName, - rows[i].ScopeVersion, - rows[i].StatusCode, - rows[i].StatusMessage, - rows[i].Attributes, - rows[i].ResourceAttributes, - rows[i].Events, - rows[i].Links, - ) + queueRow(batch, query, rowIndex, ts.UTC()) } - return db.sendCNPG(ctx, batch, fmt.Sprintf("%s traces", canonical)) + return db.sendCNPG(ctx, batch, fmt.Sprintf("%s %s", canonicalTable, kind)) +} + +func (db *DB) insertOTEL( + ctx context.Context, + table string, + defaultTable string, + kind string, + buildQuery func(sanitizedTable string) string, + inserter otelRowInserter, +) error { + return db.insertOTELRows( + ctx, + table, + defaultTable, + kind, + inserter.RowCount(), + buildQuery, + inserter.TimestampAt, + inserter.QueueRow, + ) +} + +// InsertOTELLogs persists OTEL log rows into the configured CNPG table. +func (db *DB) InsertOTELLogs(ctx context.Context, table string, rows []models.OTELLogRow) error { + return db.insertOTEL(ctx, table, defaultLogsTable, "logs", buildOTELLogsInsertQuery, otelLogInserter{rows: rows}) +} + +// InsertOTELMetrics persists OTEL metric rows into the configured CNPG table. +func (db *DB) InsertOTELMetrics(ctx context.Context, table string, rows []models.OTELMetricRow) error { + return db.insertOTEL(ctx, table, defaultMetricsTable, "metrics", buildOTELMetricsInsertQuery, otelMetricInserter{rows: rows}) +} + +// InsertOTELTraces persists OTEL trace rows into the configured CNPG table. +func (db *DB) InsertOTELTraces(ctx context.Context, table string, rows []models.OTELTraceRow) error { + return db.insertOTEL(ctx, table, defaultTracesTable, "traces", buildOTELTracesInsertQuery, otelTraceInserter{rows: rows}) } func sanitizeObservabilityTable(tableName, defaultName string) (string, string) { diff --git a/pkg/db/cnpg_pool.go b/pkg/db/cnpg_pool.go index 992052955..079d8f9cd 100644 --- a/pkg/db/cnpg_pool.go +++ b/pkg/db/cnpg_pool.go @@ -18,12 +18,10 @@ package db import ( "context" - "crypto/tls" - "crypto/x509" "fmt" "net/url" - "os" "path/filepath" + "strings" "time" "github.com/jackc/pgx/v5/pgxpool" @@ -32,54 +30,134 @@ import ( "github.com/carverauto/serviceradar/pkg/models" ) -// NewCNPGPool dials the configured CNPG cluster and returns a pgx pool that can -// be used for Timescale-backed reads/writes. -func NewCNPGPool(ctx context.Context, cfg *models.CNPGDatabase, log logger.Logger) (*pgxpool.Pool, error) { +const ( + cnpgSSLModeDisable = "disable" + cnpgSSLModeVerifyFull = "verify-full" +) + +func resolveCNPGSSLMode(cfg *models.CNPGDatabase) (string, error) { if cfg == nil { - return nil, nil + return "", ErrCNPGConfigMissing } - cnpg := *cfg - if cnpg.Port == 0 { - cnpg.Port = 5432 + sslMode := strings.TrimSpace(cfg.SSLMode) + if sslMode == "" && cfg.ExtraRuntimeParams != nil { + sslMode = strings.TrimSpace(cfg.ExtraRuntimeParams["sslmode"]) + } + + if sslMode == "" { + if cfg.TLS != nil { + sslMode = cnpgSSLModeVerifyFull + } else { + sslMode = cnpgSSLModeDisable + } + } + + sslMode = strings.ToLower(sslMode) + if cfg.TLS != nil && sslMode == cnpgSSLModeDisable { + return "", ErrCNPGTLSDisabled + } + + return sslMode, nil +} + +func resolveCNPGTLSPath(cfg *models.CNPGDatabase, path string) string { + if cfg == nil || path == "" { + return path + } + + if filepath.IsAbs(path) || cfg.CertDir == "" { + return path + } + + return filepath.Join(cfg.CertDir, path) +} + +func buildCNPGConnURL(cfg *models.CNPGDatabase) (url.URL, error) { + if cfg == nil { + return url.URL{}, ErrCNPGConfigMissing } connURL := url.URL{ Scheme: "postgres", - Host: fmt.Sprintf("%s:%d", cnpg.Host, cnpg.Port), - Path: "/" + cnpg.Database, + Host: fmt.Sprintf("%s:%d", cfg.Host, cfg.Port), + Path: "/" + cfg.Database, } - if cnpg.Username != "" { - if cnpg.Password != "" { - connURL.User = url.UserPassword(cnpg.Username, cnpg.Password) + if cfg.Username != "" { + if cfg.Password != "" { + connURL.User = url.UserPassword(cfg.Username, cfg.Password) } else { - connURL.User = url.User(cnpg.Username) + connURL.User = url.User(cfg.Username) } } query := connURL.Query() - sslMode := cnpg.SSLMode - if sslMode == "" { - sslMode = "disable" + if cfg.ApplicationName != "" { + query.Set("application_name", cfg.ApplicationName) } - query.Set("sslmode", sslMode) - if cnpg.ApplicationName != "" { - query.Set("application_name", cnpg.ApplicationName) - } + for k, v := range cfg.ExtraRuntimeParams { + if k == "" { + continue + } - for k, v := range cnpg.ExtraRuntimeParams { + k = strings.TrimSpace(k) if k == "" { continue } - query.Set(k, v) + switch strings.ToLower(k) { + case "sslmode", "sslcert", "sslkey", "sslrootcert": + continue + default: + query.Set(k, v) + } + } + + sslMode, err := resolveCNPGSSLMode(cfg) + if err != nil { + return url.URL{}, err + } + query.Set("sslmode", sslMode) + + if cfg.TLS != nil { + certFile := resolveCNPGTLSPath(cfg, cfg.TLS.CertFile) + keyFile := resolveCNPGTLSPath(cfg, cfg.TLS.KeyFile) + caFile := resolveCNPGTLSPath(cfg, cfg.TLS.CAFile) + + if certFile == "" || keyFile == "" || caFile == "" { + return url.URL{}, ErrCNPGLackingTLSFiles + } + + query.Set("sslcert", certFile) + query.Set("sslkey", keyFile) + query.Set("sslrootcert", caFile) } connURL.RawQuery = query.Encode() + return connURL, nil +} + +// NewCNPGPool dials the configured CNPG cluster and returns a pgx pool that can +// be used for Timescale-backed reads/writes. +func NewCNPGPool(ctx context.Context, cfg *models.CNPGDatabase, log logger.Logger) (*pgxpool.Pool, error) { + if cfg == nil { + return nil, nil + } + + cnpg := *cfg + if cnpg.Port == 0 { + cnpg.Port = 5432 + } + + connURL, err := buildCNPGConnURL(&cnpg) + if err != nil { + return nil, err + } + poolConfig, err := pgxpool.ParseConfig(connURL.String()) if err != nil { return nil, fmt.Errorf("cnpg: failed to parse connection string: %w", err) @@ -106,10 +184,16 @@ func NewCNPGPool(ctx context.Context, cfg *models.CNPGDatabase, log logger.Logge } for k, v := range cnpg.ExtraRuntimeParams { + k = strings.TrimSpace(k) if k == "" { continue } + switch strings.ToLower(k) { + case "sslmode", "sslcert", "sslkey", "sslrootcert": + continue + } + poolConfig.ConnConfig.RuntimeParams[k] = v } @@ -118,12 +202,6 @@ func NewCNPGPool(ctx context.Context, cfg *models.CNPGDatabase, log logger.Logge poolConfig.ConnConfig.RuntimeParams["statement_timeout"] = fmt.Sprintf("%d", timeout) } - if tlsConfig, err := buildCNPGTLSConfig(&cnpg); err != nil { - return nil, err - } else if tlsConfig != nil { - poolConfig.ConnConfig.TLSConfig = tlsConfig - } - pool, err := pgxpool.NewWithConfig(ctx, poolConfig) if err != nil { return nil, fmt.Errorf("cnpg: failed to initialize pool: %w", err) @@ -147,51 +225,3 @@ func newCNPGPool(ctx context.Context, config *models.CoreServiceConfig, log logg return NewCNPGPool(ctx, config.CNPG, log) } - -func buildCNPGTLSConfig(cfg *models.CNPGDatabase) (*tls.Config, error) { - if cfg == nil || cfg.TLS == nil { - return nil, nil - } - - resolve := func(path string) string { - if path == "" { - return path - } - - if filepath.IsAbs(path) || cfg.CertDir == "" { - return path - } - - return filepath.Join(cfg.CertDir, path) - } - - certFile := resolve(cfg.TLS.CertFile) - keyFile := resolve(cfg.TLS.KeyFile) - caFile := resolve(cfg.TLS.CAFile) - - if certFile == "" || keyFile == "" || caFile == "" { - return nil, ErrCNPGLackingTLSFiles - } - - clientCert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, fmt.Errorf("cnpg tls: failed to load client keypair: %w", err) - } - - caBytes, err := os.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("cnpg tls: failed to read CA file: %w", err) - } - - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caBytes) { - return nil, ErrCNPGAppendCACert - } - - return &tls.Config{ - Certificates: []tls.Certificate{clientCert}, - RootCAs: caPool, - MinVersion: tls.VersionTLS12, - ServerName: cfg.Host, - }, nil -} diff --git a/pkg/db/cnpg_pool_test.go b/pkg/db/cnpg_pool_test.go new file mode 100644 index 000000000..eac125f96 --- /dev/null +++ b/pkg/db/cnpg_pool_test.go @@ -0,0 +1,112 @@ +package db + +import ( + "errors" + "testing" + + "github.com/carverauto/serviceradar/pkg/models" +) + +func TestBuildCNPGConnURL_DefaultsSSLModeDisableWithoutTLS(t *testing.T) { + t.Parallel() + + u, err := buildCNPGConnURL(&models.CNPGDatabase{ + Host: "cnpg-rw", + Port: 5432, + Database: "serviceradar", + }) + if err != nil { + t.Fatalf("buildCNPGConnURL error: %v", err) + } + + if got := u.Query().Get("sslmode"); got != "disable" { + t.Fatalf("sslmode=%q, want %q", got, "disable") + } +} + +func TestBuildCNPGConnURL_DefaultsSSLModeVerifyFullWithTLS(t *testing.T) { + t.Parallel() + + u, err := buildCNPGConnURL(&models.CNPGDatabase{ + Host: "cnpg-rw", + Port: 5432, + Database: "serviceradar", + TLS: &models.TLSConfig{ + CertFile: "client.crt", + KeyFile: "client.key", + CAFile: "ca.crt", + }, + }) + if err != nil { + t.Fatalf("buildCNPGConnURL error: %v", err) + } + + if got := u.Query().Get("sslmode"); got != "verify-full" { + t.Fatalf("sslmode=%q, want %q", got, "verify-full") + } +} + +func TestBuildCNPGConnURL_RejectsTLSWithSSLModeDisable(t *testing.T) { + t.Parallel() + + _, err := buildCNPGConnURL(&models.CNPGDatabase{ + Host: "cnpg-rw", + Port: 5432, + Database: "serviceradar", + SSLMode: "disable", + TLS: &models.TLSConfig{ + CertFile: "client.crt", + KeyFile: "client.key", + CAFile: "ca.crt", + }, + }) + if !errors.Is(err, ErrCNPGTLSDisabled) { + t.Fatalf("error=%v, want %v", err, ErrCNPGTLSDisabled) + } +} + +func TestBuildCNPGConnURL_TLSPathsResolveViaCertDir(t *testing.T) { + t.Parallel() + + u, err := buildCNPGConnURL(&models.CNPGDatabase{ + Host: "cnpg-rw", + Port: 5432, + Database: "serviceradar", + CertDir: "/etc/serviceradar/cnpg", + TLS: &models.TLSConfig{ + CertFile: "client.crt", + KeyFile: "client.key", + CAFile: "ca.crt", + }, + }) + if err != nil { + t.Fatalf("buildCNPGConnURL error: %v", err) + } + + q := u.Query() + if got := q.Get("sslcert"); got != "/etc/serviceradar/cnpg/client.crt" { + t.Fatalf("sslcert=%q, want %q", got, "/etc/serviceradar/cnpg/client.crt") + } + if got := q.Get("sslkey"); got != "/etc/serviceradar/cnpg/client.key" { + t.Fatalf("sslkey=%q, want %q", got, "/etc/serviceradar/cnpg/client.key") + } + if got := q.Get("sslrootcert"); got != "/etc/serviceradar/cnpg/ca.crt" { + t.Fatalf("sslrootcert=%q, want %q", got, "/etc/serviceradar/cnpg/ca.crt") + } +} + +func TestResolveCNPGSSLMode_UsesRuntimeParamsFallback(t *testing.T) { + t.Parallel() + + got, err := resolveCNPGSSLMode(&models.CNPGDatabase{ + ExtraRuntimeParams: map[string]string{ + "sslmode": "Verify-CA", + }, + }) + if err != nil { + t.Fatalf("resolveCNPGSSLMode error: %v", err) + } + if got != "verify-ca" { + t.Fatalf("sslmode=%q, want %q", got, "verify-ca") + } +} diff --git a/pkg/db/cnpg_registry.go b/pkg/db/cnpg_registry.go index 949167804..6bf2ea1fc 100644 --- a/pkg/db/cnpg_registry.go +++ b/pkg/db/cnpg_registry.go @@ -47,18 +47,8 @@ INSERT INTO pollers ( $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14 ) ON CONFLICT (poller_id) DO UPDATE SET - component_id = EXCLUDED.component_id, - registration_source = EXCLUDED.registration_source, - status = EXCLUDED.status, - spiffe_identity = EXCLUDED.spiffe_identity, - first_registered = EXCLUDED.first_registered, - first_seen = EXCLUDED.first_seen, last_seen = EXCLUDED.last_seen, - metadata = EXCLUDED.metadata, - created_by = EXCLUDED.created_by, is_healthy = EXCLUDED.is_healthy, - agent_count = EXCLUDED.agent_count, - checker_count = EXCLUDED.checker_count, updated_at = EXCLUDED.updated_at` insertPollerHistorySQL = ` diff --git a/pkg/db/cnpg_registry_test.go b/pkg/db/cnpg_registry_test.go index 40a9d58d2..284a36f41 100644 --- a/pkg/db/cnpg_registry_test.go +++ b/pkg/db/cnpg_registry_test.go @@ -2,6 +2,7 @@ package db import ( "encoding/json" + "strings" "testing" "time" @@ -11,6 +12,27 @@ import ( "github.com/carverauto/serviceradar/pkg/models" ) +func TestUpsertPollerStatusSQL_PreservesRegistrationMetadata(t *testing.T) { + normalized := strings.Join(strings.Fields(upsertPollerStatusSQL), " ") + + assert.Contains(t, normalized, "ON CONFLICT (poller_id) DO UPDATE SET") + + assert.Contains(t, normalized, "last_seen = EXCLUDED.last_seen") + assert.Contains(t, normalized, "is_healthy = EXCLUDED.is_healthy") + assert.Contains(t, normalized, "updated_at = EXCLUDED.updated_at") + + assert.NotContains(t, normalized, "component_id = EXCLUDED.component_id") + assert.NotContains(t, normalized, "registration_source = EXCLUDED.registration_source") + assert.NotContains(t, normalized, "status = EXCLUDED.status") + assert.NotContains(t, normalized, "spiffe_identity = EXCLUDED.spiffe_identity") + assert.NotContains(t, normalized, "first_seen =") + assert.NotContains(t, normalized, "metadata = EXCLUDED.metadata") + assert.NotContains(t, normalized, "created_by = EXCLUDED.created_by") + assert.NotContains(t, normalized, "agent_count = EXCLUDED.agent_count") + assert.NotContains(t, normalized, "checker_count = EXCLUDED.checker_count") + assert.NotContains(t, normalized, "first_registered = EXCLUDED.first_registered") +} + func TestBuildCNPGPollerStatusArgs(t *testing.T) { now := time.Date(2025, time.June, 10, 12, 0, 0, 0, time.UTC) status := &models.PollerStatus{ diff --git a/pkg/db/cnpg_unified_devices.go b/pkg/db/cnpg_unified_devices.go index d8dd21858..a06419eff 100644 --- a/pkg/db/cnpg_unified_devices.go +++ b/pkg/db/cnpg_unified_devices.go @@ -489,8 +489,7 @@ func (db *DB) DeleteDevices(ctx context.Context, deviceIDs []string) error { } // Execute audit log batch - br := db.conn().SendBatch(ctx, batch) - if err := br.Close(); err != nil { + if err := sendBatchExecAll(ctx, batch, db.conn().SendBatch, "device deletion audit"); err != nil { db.logger.Warn().Err(err).Msg("Failed to log device deletions to audit trail") // Continue with deletion even if audit fails } diff --git a/pkg/db/errors.go b/pkg/db/errors.go index 21674a8ad..b36554382 100644 --- a/pkg/db/errors.go +++ b/pkg/db/errors.go @@ -88,13 +88,18 @@ var ( ErrCNPGRowsNotInitialized = errors.New("cnpg rows not initialized") + // CNPG configuration helpers. + + ErrCNPGConfigMissing = errors.New("cnpg: missing configuration") + // TLS helpers. ErrCNPGLackingTLSFiles = errors.New("cnpg tls requires cert_file, key_file, and ca_file") ErrCNPGAppendCACert = errors.New("cnpg tls: unable to append CA certificate") + ErrCNPGTLSDisabled = errors.New("cnpg tls configuration requires sslmode not be disable") // Unified Device errors. errUnifiedDeviceNotFound = errors.New("unified device not found") errFailedToQueryUnifiedDevice = errors.New("failed to query unified device") errFailedToScanUnifiedDeviceRow = errors.New("failed to scan unified device row") -) \ No newline at end of file +) diff --git a/pkg/db/events.go b/pkg/db/events.go index 1190659c3..f4dfda0a0 100644 --- a/pkg/db/events.go +++ b/pkg/db/events.go @@ -80,8 +80,7 @@ func (db *DB) InsertEvents(ctx context.Context, rows []*models.EventRow) error { ) } - br := db.pgPool.SendBatch(ctx, batch) - if err := br.Close(); err != nil { + if err := sendBatchExecAll(ctx, batch, db.conn().SendBatch, "events"); err != nil { return fmt.Errorf("failed to insert events: %w", err) } diff --git a/pkg/db/interfaces.go b/pkg/db/interfaces.go index 9104cf038..48f1c3571 100644 --- a/pkg/db/interfaces.go +++ b/pkg/db/interfaces.go @@ -177,7 +177,7 @@ type Service interface { // Device identifier lookup operations (for IdentityEngine). GetDeviceIDByIdentifier(ctx context.Context, identifierType, identifierValue, partition string) (string, error) - BatchGetDeviceIDsByIdentifier(ctx context.Context, identifierType string, identifierValues []string) (map[string]string, error) + BatchGetDeviceIDsByIdentifier(ctx context.Context, identifierType string, identifierValues []string, partition string) (map[string]string, error) } // SysmonMetricsProvider interface defines operations for system monitoring metrics. diff --git a/pkg/db/mock_db.go b/pkg/db/mock_db.go index 72a89ff60..c78e21311 100644 --- a/pkg/db/mock_db.go +++ b/pkg/db/mock_db.go @@ -1254,6 +1254,7 @@ func (mr *MockServiceMockRecorder) WithTx(ctx, fn any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithTx", reflect.TypeOf((*MockService)(nil).WithTx), ctx, fn) } + // QueryRegistryRows mocks base method. func (m *MockService) QueryRegistryRows(ctx context.Context, query string, args ...interface{}) (Rows, error) { m.ctrl.T.Helper() @@ -1290,18 +1291,18 @@ func (mr *MockServiceMockRecorder) GetDeviceIDByIdentifier(ctx, identifierType, } // BatchGetDeviceIDsByIdentifier mocks base method. -func (m *MockService) BatchGetDeviceIDsByIdentifier(ctx context.Context, identifierType string, identifierValues []string) (map[string]string, error) { +func (m *MockService) BatchGetDeviceIDsByIdentifier(ctx context.Context, identifierType string, identifierValues []string, partition string) (map[string]string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchGetDeviceIDsByIdentifier", ctx, identifierType, identifierValues) + ret := m.ctrl.Call(m, "BatchGetDeviceIDsByIdentifier", ctx, identifierType, identifierValues, partition) ret0, _ := ret[0].(map[string]string) ret1, _ := ret[1].(error) return ret0, ret1 } // BatchGetDeviceIDsByIdentifier indicates an expected call of BatchGetDeviceIDsByIdentifier. -func (mr *MockServiceMockRecorder) BatchGetDeviceIDsByIdentifier(ctx, identifierType, identifierValues any) *gomock.Call { +func (mr *MockServiceMockRecorder) BatchGetDeviceIDsByIdentifier(ctx, identifierType, identifierValues, partition any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetDeviceIDsByIdentifier", reflect.TypeOf((*MockService)(nil).BatchGetDeviceIDsByIdentifier), ctx, identifierType, identifierValues) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetDeviceIDsByIdentifier", reflect.TypeOf((*MockService)(nil).BatchGetDeviceIDsByIdentifier), ctx, identifierType, identifierValues, partition) } // MockRows is a mock of Rows interface. @@ -1385,4 +1386,4 @@ func (m *MockRows) Err() error { func (mr *MockRowsMockRecorder) Err() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockRows)(nil).Err)) -} \ No newline at end of file +} diff --git a/pkg/db/pgx_batch_behavior_test.go b/pkg/db/pgx_batch_behavior_test.go new file mode 100644 index 000000000..6f7e3c080 --- /dev/null +++ b/pkg/db/pgx_batch_behavior_test.go @@ -0,0 +1,79 @@ +package db + +import ( + "context" + "errors" + "testing" + + "github.com/carverauto/serviceradar/pkg/models" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/stretchr/testify/require" +) + +// Static test errors for err113 compliance. +var ( + errFakeExecutorExecNotImplemented = errors.New("Exec not implemented in fakePgxExecutor") + errFakeExecutorQueryNotImplemented = errors.New("Query not implemented in fakePgxExecutor") + errInsertFailed = errors.New("insert failed") +) + +type fakePgxExecutor struct { + br *fakeBatchResults +} + +func (f *fakePgxExecutor) Exec(context.Context, string, ...any) (pgconn.CommandTag, error) { + return pgconn.CommandTag{}, errFakeExecutorExecNotImplemented +} + +func (f *fakePgxExecutor) Query(context.Context, string, ...any) (pgx.Rows, error) { + return nil, errFakeExecutorQueryNotImplemented +} + +func (f *fakePgxExecutor) QueryRow(context.Context, string, ...any) pgx.Row { + return fakeBatchRow{} +} + +func (f *fakePgxExecutor) SendBatch(context.Context, *pgx.Batch) pgx.BatchResults { + return f.br +} + +func TestInsertEvents_SurfacesBatchInsertErrors(t *testing.T) { + ctx := context.Background() + + br := &fakeBatchResults{ + execErrAt: 1, + execErr: errInsertFailed, + } + + db := &DB{executor: &fakePgxExecutor{br: br}} + + err := db.InsertEvents(ctx, []*models.EventRow{ + {ID: "a"}, + {ID: "b"}, + {ID: "c"}, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to insert events") + require.Contains(t, err.Error(), "events batch exec (command 1)") + require.Equal(t, 1, br.closeCalls) +} + +func TestStoreBatchUsers_SurfacesBatchInsertErrors(t *testing.T) { + ctx := context.Background() + + br := &fakeBatchResults{ + execErrAt: 0, + execErr: errInsertFailed, + } + + db := &DB{executor: &fakePgxExecutor{br: br}} + + err := db.StoreBatchUsers(ctx, []*models.User{ + {ID: "u1", Email: "u1@example.com", Name: "u1", Provider: "local", Roles: []string{"admin"}}, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to store batch users") + require.Contains(t, err.Error(), "users batch exec (command 0)") + require.Equal(t, 1, br.closeCalls) +} diff --git a/pkg/db/pgx_batch_helper.go b/pkg/db/pgx_batch_helper.go new file mode 100644 index 000000000..fe0e79700 --- /dev/null +++ b/pkg/db/pgx_batch_helper.go @@ -0,0 +1,29 @@ +package db + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v5" +) + +func sendBatchExecAll(ctx context.Context, batch *pgx.Batch, send func(context.Context, *pgx.Batch) pgx.BatchResults, operation string) (err error) { + if batch == nil || batch.Len() == 0 { + return nil + } + + br := send(ctx, batch) + defer func() { + if closeErr := br.Close(); closeErr != nil && err == nil { + err = fmt.Errorf("%s batch close: %w", operation, closeErr) + } + }() + + for i := 0; i < batch.Len(); i++ { + if _, err = br.Exec(); err != nil { + return fmt.Errorf("%s batch exec (command %d): %w", operation, i, err) + } + } + + return nil +} diff --git a/pkg/db/pgx_batch_helper_test.go b/pkg/db/pgx_batch_helper_test.go new file mode 100644 index 000000000..02bfc0b3c --- /dev/null +++ b/pkg/db/pgx_batch_helper_test.go @@ -0,0 +1,99 @@ +package db + +import ( + "context" + "errors" + "testing" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/stretchr/testify/require" +) + +// Static test errors for err113 compliance. +var ( + errFakeBatchResultsQuery = errors.New("Query not implemented in fakeBatchResults") + errFakeBatchRowScan = errors.New("Scan not implemented in fakeBatchRow") + errBoom = errors.New("boom") + errCloseFailed = errors.New("close failed") +) + +type fakeBatchResults struct { + execCalls int + execErrAt int + execErr error + + closeCalls int + closeErr error +} + +func (f *fakeBatchResults) Exec() (pgconn.CommandTag, error) { + defer func() { f.execCalls++ }() + if f.execErr != nil && f.execCalls == f.execErrAt { + return pgconn.CommandTag{}, f.execErr + } + return pgconn.NewCommandTag("INSERT 0 1"), nil +} + +func (f *fakeBatchResults) Query() (pgx.Rows, error) { + return nil, errFakeBatchResultsQuery +} + +type fakeBatchRow struct{} + +func (fakeBatchRow) Scan(...any) error { return errFakeBatchRowScan } + +func (f *fakeBatchResults) QueryRow() pgx.Row { + return fakeBatchRow{} +} + +func (f *fakeBatchResults) Close() error { + f.closeCalls++ + return f.closeErr +} + +func TestSendBatchExecAll_EmptyBatchDoesNotSend(t *testing.T) { + ctx := context.Background() + batch := &pgx.Batch{} + + err := sendBatchExecAll(ctx, batch, func(context.Context, *pgx.Batch) pgx.BatchResults { + t.Fatalf("SendBatch should not be called for empty batch") + return nil + }, "test") + require.NoError(t, err) +} + +func TestSendBatchExecAll_ExecErrorIncludesCommandIndexAndCloses(t *testing.T) { + ctx := context.Background() + batch := &pgx.Batch{} + batch.Queue("SELECT 1") + batch.Queue("SELECT 2") + batch.Queue("SELECT 3") + + br := &fakeBatchResults{ + execErrAt: 1, + execErr: errBoom, + } + + err := sendBatchExecAll(ctx, batch, func(context.Context, *pgx.Batch) pgx.BatchResults { + return br + }, "op-name") + require.Error(t, err) + require.Contains(t, err.Error(), "op-name batch exec (command 1)") + require.Equal(t, 1, br.closeCalls) +} + +func TestSendBatchExecAll_CloseErrorReturnedWhenExecSucceeds(t *testing.T) { + ctx := context.Background() + batch := &pgx.Batch{} + batch.Queue("SELECT 1") + + br := &fakeBatchResults{closeErr: errCloseFailed} + + err := sendBatchExecAll(ctx, batch, func(context.Context, *pgx.Batch) pgx.BatchResults { + return br + }, "op-name") + require.Error(t, err) + require.Contains(t, err.Error(), "op-name batch close: close failed") + require.Equal(t, 1, br.closeCalls) +} diff --git a/pkg/identitymap/identitymap.go b/pkg/identitymap/identitymap.go index 4570a919a..991436de1 100644 --- a/pkg/identitymap/identitymap.go +++ b/pkg/identitymap/identitymap.go @@ -93,30 +93,6 @@ func BuildKeys(update *models.DeviceUpdate) []Key { add(KindNetboxID, id) } } - - if aliasService := strings.TrimSpace(update.Metadata["_alias_last_seen_service_id"]); aliasService != "" { - add(KindDeviceID, aliasService) - } - if aliasIP := strings.TrimSpace(update.Metadata["_alias_last_seen_ip"]); aliasIP != "" { - add(KindIP, aliasIP) - add(KindPartitionIP, partitionIPValue(update.Partition, aliasIP)) - } - - for key := range update.Metadata { - switch { - case strings.HasPrefix(key, "service_alias:"): - aliasID := strings.TrimSpace(strings.TrimPrefix(key, "service_alias:")) - if aliasID != "" { - add(KindDeviceID, aliasID) - } - case strings.HasPrefix(key, "ip_alias:"): - aliasIP := strings.TrimSpace(strings.TrimPrefix(key, "ip_alias:")) - if aliasIP != "" { - add(KindIP, aliasIP) - add(KindPartitionIP, partitionIPValue(update.Partition, aliasIP)) - } - } - } } if update.MAC != nil { @@ -128,40 +104,6 @@ func BuildKeys(update *models.DeviceUpdate) []Key { return keys } -// BuildKeysFromRecord reconstructs the identity keys for a canonical record. -func BuildKeysFromRecord(record *Record) []Key { - if record == nil { - return nil - } - - update := &models.DeviceUpdate{ - DeviceID: record.CanonicalDeviceID, - Partition: record.Partition, - } - - if record.Attributes != nil { - if ip := strings.TrimSpace(record.Attributes["ip"]); ip != "" { - update.IP = ip - } - if mac := strings.TrimSpace(record.Attributes["mac"]); mac != "" { - macUpper := strings.ToUpper(mac) - update.MAC = &macUpper - } - - metaKeys := []string{"armis_device_id", "integration_id", "integration_type", "netbox_device_id"} - for _, key := range metaKeys { - if val := strings.TrimSpace(record.Attributes[key]); val != "" { - if update.Metadata == nil { - update.Metadata = make(map[string]string) - } - update.Metadata[key] = val - } - } - } - - return BuildKeys(update) -} - // HashIdentityMetadata produces a stable fingerprint of identity-relevant fields for a device update. func HashIdentityMetadata(update *models.DeviceUpdate) string { if update == nil { diff --git a/pkg/identitymap/identitymap_test.go b/pkg/identitymap/identitymap_test.go index 9545136b2..d7e58bc4b 100644 --- a/pkg/identitymap/identitymap_test.go +++ b/pkg/identitymap/identitymap_test.go @@ -61,68 +61,6 @@ func TestBuildKeysIncludesIPWhenDistinct(t *testing.T) { assert.Contains(t, keys, Key{Kind: KindPartitionIP, Value: "tenant-a:10.0.0.5"}) } -func TestBuildKeysIncludesAliasMetadata(t *testing.T) { - mac := testMACAddress - update := &models.DeviceUpdate{ - DeviceID: "tenant-a:host-device", - IP: "10.0.0.5", - Partition: "tenant-a", - MAC: &mac, - Metadata: map[string]string{ - "_alias_last_seen_service_id": "serviceradar:agent:k8s-agent", - "_alias_last_seen_ip": "10.0.0.5", - "service_alias:serviceradar:poller:k8s": "2025-11-03T15:00:00Z", - "ip_alias:10.0.0.8": "2025-11-03T15:00:00Z", - "armis_device_id": "armis-123", - "integration_type": "netbox", - "integration_id": "nb-42", - "netbox_device_id": "123", - }, - } - - keys := BuildKeys(update) - - assert.Contains(t, keys, Key{Kind: KindDeviceID, Value: "serviceradar:agent:k8s-agent"}) - assert.Contains(t, keys, Key{Kind: KindDeviceID, Value: "serviceradar:poller:k8s"}) - assert.Contains(t, keys, Key{Kind: KindIP, Value: "10.0.0.8"}) - assert.Contains(t, keys, Key{Kind: KindPartitionIP, Value: "tenant-a:10.0.0.8"}) -} - -func TestBuildKeysFromRecord(t *testing.T) { - mac := "aa:bb:cc:dd:ee:ff" - update := &models.DeviceUpdate{ - DeviceID: "tenant-a:device-42", - IP: "10.1.2.3", - Partition: "tenant-a", - MAC: &mac, - Metadata: map[string]string{ - "armis_device_id": "armis-42", - "integration_id": "nb-42", - "integration_type": "netbox", - "netbox_device_id": "device-42", - }, - } - - record := &Record{ - CanonicalDeviceID: update.DeviceID, - Partition: update.Partition, - MetadataHash: HashIdentityMetadata(update), - Attributes: map[string]string{ - "ip": update.IP, - "mac": "AA:BB:CC:DD:EE:FF", - "armis_device_id": update.Metadata["armis_device_id"], - "integration_id": update.Metadata["integration_id"], - "integration_type": update.Metadata["integration_type"], - "netbox_device_id": update.Metadata["netbox_device_id"], - }, - } - - keysFromRecord := BuildKeysFromRecord(record) - keysFromUpdate := BuildKeys(update) - - assert.ElementsMatch(t, keysFromUpdate, keysFromRecord) -} - func TestMarshalRoundtrip(t *testing.T) { rec := &Record{ CanonicalDeviceID: "tenant-a:canonical", diff --git a/pkg/mcp/BUILD.bazel b/pkg/mcp/BUILD.bazel index 0f519b6b2..59117c34c 100644 --- a/pkg/mcp/BUILD.bazel +++ b/pkg/mcp/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "mcp", srcs = [ + "binds.go", "builder.go", "errors.go", "query_utils.go", @@ -27,6 +28,7 @@ go_library( go_test( name = "mcp_test", srcs = [ + "parameterized_queries_test.go", "server_test.go", "tools_graph_test.go", ], diff --git a/pkg/mcp/binds.go b/pkg/mcp/binds.go new file mode 100644 index 000000000..e38a130d7 --- /dev/null +++ b/pkg/mcp/binds.go @@ -0,0 +1,12 @@ +package mcp + +import "fmt" + +type srqlBindBuilder struct { + params []any +} + +func (b *srqlBindBuilder) Bind(value any) string { + b.params = append(b.params, value) + return fmt.Sprintf("$%d", len(b.params)) +} diff --git a/pkg/mcp/builder.go b/pkg/mcp/builder.go index 0439365f0..1f150e669 100644 --- a/pkg/mcp/builder.go +++ b/pkg/mcp/builder.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "sort" "strings" "time" ) @@ -138,11 +139,11 @@ func BuildFilteredQuery(entity string, params FilterQueryParams, additionalFilte } // FilterHandlerFunc represents a function that builds additional filters for a specific entity type -type FilterHandlerFunc func(args json.RawMessage) ([]string, map[string]interface{}, error) +type FilterHandlerFunc func(args json.RawMessage, binds *srqlBindBuilder) ([]string, map[string]interface{}, error) // FilterBuilder defines an interface for building entity-specific filters type FilterBuilder interface { - BuildFilters(args json.RawMessage) ([]string, map[string]interface{}, error) + BuildFilters(args json.RawMessage, binds *srqlBindBuilder) ([]string, map[string]interface{}, error) } // GenericFilterBuilder implements FilterBuilder with configurable field mappings @@ -152,7 +153,7 @@ type GenericFilterBuilder struct { } // BuildFilters builds filters for a generic entity using field mappings -func (g *GenericFilterBuilder) BuildFilters(args json.RawMessage) ( +func (g *GenericFilterBuilder) BuildFilters(args json.RawMessage, binds *srqlBindBuilder) ( additionalFilters []string, responseFilters map[string]interface{}, err error) { var rawArgs map[string]interface{} @@ -161,12 +162,22 @@ func (g *GenericFilterBuilder) BuildFilters(args json.RawMessage) ( } responseFilters = make(map[string]interface{}) + if binds == nil { + binds = &srqlBindBuilder{} + } // Process field mappings - for jsonField, sqlField := range g.FieldMappings { + jsonFields := make([]string, 0, len(g.FieldMappings)) + for jsonField := range g.FieldMappings { + jsonFields = append(jsonFields, jsonField) + } + sort.Strings(jsonFields) + + for _, jsonField := range jsonFields { + sqlField := g.FieldMappings[jsonField] if value, exists := rawArgs[jsonField]; exists && value != nil { if strValue, ok := value.(string); ok && strValue != "" { - additionalFilters = append(additionalFilters, fmt.Sprintf("%s = '%s'", sqlField, strValue)) + additionalFilters = append(additionalFilters, fmt.Sprintf("%s = %s", sqlField, binds.Bind(strValue))) } } } @@ -187,6 +198,8 @@ func (m *MCPServer) BuildGenericFilterTool(name, description, entity, resultsKey Name: name, Description: description, Handler: func(ctx context.Context, args json.RawMessage) (interface{}, error) { + binds := &srqlBindBuilder{} + // Extract common filter parameters var commonParams FilterQueryParams if err := json.Unmarshal(args, &commonParams); err != nil { @@ -194,7 +207,7 @@ func (m *MCPServer) BuildGenericFilterTool(name, description, entity, resultsKey } // Get entity-specific filters and response filters - additionalFilters, responseFilters, err := filterHandler(args) + additionalFilters, responseFilters, err := filterHandler(args, binds) if err != nil { return nil, err } @@ -205,7 +218,7 @@ func (m *MCPServer) BuildGenericFilterTool(name, description, entity, resultsKey m.logger.Debug().Str("query", query).Msgf("Executing %s query", entity) // Execute SRQL query via API - results, err := m.executeSRQLQuery(ctx, query, commonParams.Limit) + results, err := m.executeSRQLQueryWithParams(ctx, query, binds.params, commonParams.Limit) if err != nil { return nil, fmt.Errorf("failed to execute %s query: %w", entity, err) } diff --git a/pkg/mcp/parameterized_queries_test.go b/pkg/mcp/parameterized_queries_test.go new file mode 100644 index 000000000..0dfabc42d --- /dev/null +++ b/pkg/mcp/parameterized_queries_test.go @@ -0,0 +1,123 @@ +package mcp + +import ( + "context" + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/carverauto/serviceradar/pkg/logger" +) + +type recordingParameterizedExecutor struct { + lastQuery string + lastParams []any + lastLimit int + calledPlain int + calledWithParam int + + results []map[string]interface{} +} + +func (r *recordingParameterizedExecutor) ExecuteSRQLQuery(_ context.Context, query string, limit int) ([]map[string]interface{}, error) { + r.calledPlain++ + r.lastQuery = query + r.lastLimit = limit + r.lastParams = nil + return r.results, nil +} + +func (r *recordingParameterizedExecutor) ExecuteSRQLQueryWithParams(_ context.Context, query string, params []any, limit int) ([]map[string]interface{}, error) { + r.calledWithParam++ + r.lastQuery = query + r.lastParams = append([]any(nil), params...) + r.lastLimit = limit + return r.results, nil +} + +var _ ParameterizedQueryExecutor = (*recordingParameterizedExecutor)(nil) + +func TestDevicesGetDeviceBindsDeviceID(t *testing.T) { + exec := &recordingParameterizedExecutor{ + results: []map[string]interface{}{ + {"device_id": "ok"}, + }, + } + server := NewMCPServer(context.Background(), exec, logger.NewTestLogger(), &MCPConfig{Enabled: true}, nil) + tool := server.tools["devices.getDevice"] + + payload := map[string]any{ + "device_id": "device' OR '1'='1", + } + raw, err := json.Marshal(payload) + require.NoError(t, err) + + _, err = tool.Handler(context.Background(), raw) + require.NoError(t, err) + + require.Equal(t, 0, exec.calledPlain) + require.Equal(t, 1, exec.calledWithParam) + require.Equal(t, "SHOW devices WHERE device_id = $1 LIMIT 1", exec.lastQuery) + require.Equal(t, []any{payload["device_id"]}, exec.lastParams) +} + +func TestLogsGetRecentLogsBindsPollerID(t *testing.T) { + exec := &recordingParameterizedExecutor{ + results: []map[string]interface{}{{"log": "ok"}}, + } + server := NewMCPServer(context.Background(), exec, logger.NewTestLogger(), &MCPConfig{Enabled: true}, nil) + tool := server.tools["logs.getRecentLogs"] + + raw, err := json.Marshal(map[string]any{ + "poller_id": "poller' OR '1'='1", + "limit": 5, + }) + require.NoError(t, err) + + _, err = tool.Handler(context.Background(), raw) + require.NoError(t, err) + + require.Equal(t, 0, exec.calledPlain) + require.Equal(t, 1, exec.calledWithParam) + require.Equal(t, "SHOW logs WHERE poller_id = $1 ORDER BY timestamp DESC LIMIT 5", exec.lastQuery) + require.Equal(t, []any{"poller' OR '1'='1"}, exec.lastParams) +} + +func TestEventsGetEventsBindsMappedFilters(t *testing.T) { + exec := &recordingParameterizedExecutor{ + results: []map[string]interface{}{{"event_type": "ok"}}, + } + server := NewMCPServer(context.Background(), exec, logger.NewTestLogger(), &MCPConfig{Enabled: true}, nil) + tool := server.tools["events.getEvents"] + + raw, err := json.Marshal(map[string]any{ + "event_type": "network_down' OR '1'='1", + "severity": "critical", + "limit": 10, + }) + require.NoError(t, err) + + _, err = tool.Handler(context.Background(), raw) + require.NoError(t, err) + + require.Equal(t, 0, exec.calledPlain) + require.Equal(t, 1, exec.calledWithParam) + require.Equal(t, "SHOW events WHERE (event_type = $1) AND (severity = $2) ORDER BY _tp_time DESC LIMIT 10", exec.lastQuery) + require.Equal(t, []any{"network_down' OR '1'='1", "critical"}, exec.lastParams) +} + +func TestStructuredToolsRequireParameterizedExecutor(t *testing.T) { + exec := &recordingExecutor{} + server := NewMCPServer(context.Background(), exec, logger.NewTestLogger(), &MCPConfig{Enabled: true}, nil) + tool := server.tools["devices.getDevice"] + + raw, err := json.Marshal(map[string]any{ + "device_id": "device-123", + }) + require.NoError(t, err) + + _, err = tool.Handler(context.Background(), raw) + require.Error(t, err) + require.Contains(t, err.Error(), "parameterized SRQL") +} diff --git a/pkg/mcp/query_utils.go b/pkg/mcp/query_utils.go index cc26a6389..5a9670be3 100644 --- a/pkg/mcp/query_utils.go +++ b/pkg/mcp/query_utils.go @@ -3,6 +3,7 @@ package mcp import ( "context" "encoding/json" + "errors" "fmt" ) @@ -13,6 +14,8 @@ const ( defaultLimit = 100 ) +var errParameterizedSRQLNotSupported = errors.New("query executor does not support parameterized SRQL queries") + // getEntityTimestampField returns the appropriate timestamp field name for each entity type func getEntityTimestampField(entity string) string { switch entity { @@ -57,20 +60,41 @@ type QueryExecutor interface { ExecuteSRQLQuery(ctx context.Context, query string, limit int) ([]map[string]interface{}, error) } -func buildLogQuery(params LogQueryParams) string { +// ParameterizedQueryExecutor extends QueryExecutor with parameter binding support. +// Implementations MUST treat params as bound values (not text concatenated into query). +type ParameterizedQueryExecutor interface { + QueryExecutor + ExecuteSRQLQueryWithParams(ctx context.Context, query string, params []any, limit int) ([]map[string]interface{}, error) +} + +func executeSRQL(ctx context.Context, executor QueryExecutor, query string, params []any, limit int) ([]map[string]interface{}, error) { + if len(params) == 0 { + return executor.ExecuteSRQLQuery(ctx, query, limit) + } + + parameterized, ok := executor.(ParameterizedQueryExecutor) + if !ok { + return nil, fmt.Errorf("%w", errParameterizedSRQLNotSupported) + } + + return parameterized.ExecuteSRQLQueryWithParams(ctx, query, params, limit) +} + +func buildLogQuery(params LogQueryParams) (string, []any) { query := showLogsQuery conditions := []string{} + binds := &srqlBindBuilder{} if params.Filter != "" { conditions = append(conditions, params.Filter) } if params.StartTime != "" { - conditions = append(conditions, fmt.Sprintf("_tp_time >= '%s'", params.StartTime)) + conditions = append(conditions, fmt.Sprintf("_tp_time >= %s", binds.Bind(params.StartTime))) } if params.EndTime != "" { - conditions = append(conditions, fmt.Sprintf("_tp_time <= '%s'", params.EndTime)) + conditions = append(conditions, fmt.Sprintf("_tp_time <= %s", binds.Bind(params.EndTime))) } if len(conditions) > 0 { @@ -88,14 +112,15 @@ func buildLogQuery(params LogQueryParams) string { query += fmt.Sprintf(" LIMIT %d", params.Limit) - return query + return query, binds.params } -func buildRecentLogsQuery(params RecentLogsParams) string { +func buildRecentLogsQuery(params RecentLogsParams) (string, []any) { query := showLogsQuery + binds := &srqlBindBuilder{} if params.PollerID != "" { - query += fmt.Sprintf(" WHERE poller_id = '%s'", params.PollerID) + query += fmt.Sprintf(" WHERE poller_id = %s", binds.Bind(params.PollerID)) } if params.Limit <= 0 { @@ -104,7 +129,7 @@ func buildRecentLogsQuery(params RecentLogsParams) string { query += fmt.Sprintf(" ORDER BY _tp_time DESC LIMIT %d", params.Limit) - return query + return query, binds.params } func executeQueryLogs(ctx context.Context, args json.RawMessage, executor QueryExecutor) ([]map[string]interface{}, error) { @@ -116,9 +141,8 @@ func executeQueryLogs(ctx context.Context, args json.RawMessage, executor QueryE } } - query := buildLogQuery(params) - - return executor.ExecuteSRQLQuery(ctx, query, params.Limit) + query, binds := buildLogQuery(params) + return executeSRQL(ctx, executor, query, binds, params.Limit) } func executeGetRecentLogs(ctx context.Context, args json.RawMessage, executor QueryExecutor) ([]map[string]interface{}, error) { @@ -134,18 +158,18 @@ func executeGetRecentLogs(ctx context.Context, args json.RawMessage, executor Qu params.Limit = defaultLimit } - query := buildRecentLogsQuery(params) - - return executor.ExecuteSRQLQuery(ctx, query, params.Limit) + query, binds := buildRecentLogsQuery(params) + return executeSRQL(ctx, executor, query, binds, params.Limit) } -func buildDevicesQuery(params ListDevicesParams) string { +func buildDevicesQuery(params ListDevicesParams) (string, []any) { query := showDevicesQuery + binds := &srqlBindBuilder{} var conditions []string if params.Type != "" { - conditions = append(conditions, fmt.Sprintf("device_type = '%s'", params.Type)) + conditions = append(conditions, fmt.Sprintf("device_type = %s", binds.Bind(params.Type))) } if params.Status != "" { @@ -163,7 +187,7 @@ func buildDevicesQuery(params ListDevicesParams) string { condition = fmt.Sprintf("is_available = %s", params.Status) } else { // Fallback: assume it's a custom status field - condition = fmt.Sprintf("status = '%s'", params.Status) + condition = fmt.Sprintf("status = %s", binds.Bind(params.Status)) } } @@ -183,7 +207,7 @@ func buildDevicesQuery(params ListDevicesParams) string { query += fmt.Sprintf(" LIMIT %d", params.Limit) - return query + return query, binds.params } func executeListDevices(ctx context.Context, args json.RawMessage, executor QueryExecutor) ([]map[string]interface{}, error) { @@ -195,7 +219,6 @@ func executeListDevices(ctx context.Context, args json.RawMessage, executor Quer } } - query := buildDevicesQuery(params) - - return executor.ExecuteSRQLQuery(ctx, query, params.Limit) + query, binds := buildDevicesQuery(params) + return executeSRQL(ctx, executor, query, binds, params.Limit) } diff --git a/pkg/mcp/server.go b/pkg/mcp/server.go index d9f6bf0a3..314bbb2db 100644 --- a/pkg/mcp/server.go +++ b/pkg/mcp/server.go @@ -702,9 +702,8 @@ func (m *MCPServer) executeGetDevice(ctx context.Context, args json.RawMessage) return nil, errDeviceIDRequired } - query := fmt.Sprintf("SHOW devices WHERE device_id = '%s' LIMIT 1", params.DeviceID) - - return m.queryExecutor.ExecuteSRQLQuery(ctx, query, 1) + query := "SHOW devices WHERE device_id = $1 LIMIT 1" + return m.executeSRQLQueryWithParams(ctx, query, []any{params.DeviceID}, 1) } func (m *MCPServer) executeQueryEvents(ctx context.Context, args json.RawMessage) (interface{}, error) { @@ -726,13 +725,14 @@ func (m *MCPServer) executeQueryEvents(ctx context.Context, args json.RawMessage query := showEventsQuery conditions := []string{} timestampField := getEntityTimestampField("events") + binds := &srqlBindBuilder{} if params.StartTime != "" { - conditions = append(conditions, fmt.Sprintf("%s >= '%s'", timestampField, params.StartTime)) + conditions = append(conditions, fmt.Sprintf("%s >= %s", timestampField, binds.Bind(params.StartTime))) } if params.EndTime != "" { - conditions = append(conditions, fmt.Sprintf("%s <= '%s'", timestampField, params.EndTime)) + conditions = append(conditions, fmt.Sprintf("%s <= %s", timestampField, binds.Bind(params.EndTime))) } if len(conditions) > 0 { @@ -748,9 +748,11 @@ func (m *MCPServer) executeQueryEvents(ctx context.Context, args json.RawMessage query += fmt.Sprintf(" ORDER BY %s DESC LIMIT %d", timestampField, params.Limit) params.Query = query + + return m.executeSRQLQueryWithParams(ctx, params.Query, binds.params, params.Limit) } - return m.queryExecutor.ExecuteSRQLQuery(ctx, params.Query, params.Limit) + return m.executeSRQLQuery(ctx, params.Query, params.Limit) } func (m *MCPServer) executeExecuteSRQL(ctx context.Context, args json.RawMessage) (interface{}, error) { @@ -799,10 +801,25 @@ func (m *MCPServer) executeGetRecentLogs(ctx context.Context, args json.RawMessa // executeSRQLQuery executes an SRQL query directly via the query executor // It handles transformations for unsupported entity types like sweep_results func (m *MCPServer) executeSRQLQuery(ctx context.Context, query string, limit int) ([]map[string]interface{}, error) { + return m.executeSRQLQueryWithParams(ctx, query, nil, limit) +} + +// executeSRQLQueryWithParams executes an SRQL query with bound parameters via a parameter-capable query executor. +// If params is empty, this falls back to plain ExecuteSRQLQuery. +func (m *MCPServer) executeSRQLQueryWithParams(ctx context.Context, query string, params []any, limit int) ([]map[string]interface{}, error) { // Transform sweep_results queries to devices queries with sweep discovery source filter transformedQuery := m.transformSweepResultsQuery(query) - return m.queryExecutor.ExecuteSRQLQuery(ctx, transformedQuery, limit) + if len(params) == 0 { + return m.queryExecutor.ExecuteSRQLQuery(ctx, transformedQuery, limit) + } + + executor, ok := m.queryExecutor.(ParameterizedQueryExecutor) + if !ok { + return nil, fmt.Errorf("%w", errParameterizedSRQLNotSupported) + } + + return executor.ExecuteSRQLQueryWithParams(ctx, transformedQuery, params, limit) } // transformSweepResultsQuery transforms sweep_results queries to equivalent devices queries diff --git a/pkg/mcp/server_test.go b/pkg/mcp/server_test.go index 4ae8f7e7b..103c0f019 100644 --- a/pkg/mcp/server_test.go +++ b/pkg/mcp/server_test.go @@ -17,13 +17,13 @@ package mcp import ( - "context" - "testing" + "context" + "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/assert" - "github.com/carverauto/serviceradar/pkg/core/auth" - "github.com/carverauto/serviceradar/pkg/logger" + "github.com/carverauto/serviceradar/pkg/core/auth" + "github.com/carverauto/serviceradar/pkg/logger" ) // mockQueryExecutor implements local QueryExecutor for testing diff --git a/pkg/mcp/tools_devices.go b/pkg/mcp/tools_devices.go index 3ff71cf2a..029efd5db 100644 --- a/pkg/mcp/tools_devices.go +++ b/pkg/mcp/tools_devices.go @@ -80,14 +80,14 @@ func (m *MCPServer) registerDeviceTools() { return nil, errDeviceIDRequired } - // Build SRQL query for specific device - filter := fmt.Sprintf("device_id = '%s'", deviceIDArgs.DeviceID) + // Build SRQL query for specific device (parameterized) + filter := "device_id = $1" query := BuildSRQL("devices", filter, "", 1, false) m.logger.Debug().Str("device_id", deviceIDArgs.DeviceID).Str("query", query).Msg("Retrieving device") // Execute SRQL query via API - results, err := m.executeSRQLQuery(ctx, query, 1) + results, err := m.executeSRQLQueryWithParams(ctx, query, []any{deviceIDArgs.DeviceID}, 1) if err != nil { return nil, fmt.Errorf("failed to execute device query: %w", err) } diff --git a/pkg/mcp/tools_events.go b/pkg/mcp/tools_events.go index 24610b7c2..1ce5ea5e7 100644 --- a/pkg/mcp/tools_events.go +++ b/pkg/mcp/tools_events.go @@ -95,10 +95,12 @@ func (m *MCPServer) registerGetAlertsTool() { // Build filters for high-severity events var filters []string + var queryParams []any filters = append(filters, "severity IN ('critical', 'high', 'alert')") if alertArgs.PollerID != "" { - filters = append(filters, fmt.Sprintf("poller_id = '%s'", alertArgs.PollerID)) + filters = append(filters, "poller_id = $1") + queryParams = append(queryParams, alertArgs.PollerID) } if alertArgs.StartTime != nil { @@ -116,7 +118,7 @@ func (m *MCPServer) registerGetAlertsTool() { m.logger.Debug().Str("query", query).Msg("Executing alerts query") // Execute SRQL query via API - results, err := m.executeSRQLQuery(ctx, query, limit) + results, err := m.executeSRQLQueryWithParams(ctx, query, queryParams, limit) if err != nil { return nil, fmt.Errorf("failed to execute alerts query: %w", err) } diff --git a/pkg/mcp/tools_logs.go b/pkg/mcp/tools_logs.go index 923651948..5a81a2681 100644 --- a/pkg/mcp/tools_logs.go +++ b/pkg/mcp/tools_logs.go @@ -101,8 +101,10 @@ func (m *MCPServer) registerLogTools() { } var filter string + var queryParams []any if recentArgs.PollerID != "" { - filter = fmt.Sprintf("poller_id = '%s'", recentArgs.PollerID) + filter = "poller_id = $1" + queryParams = []any{recentArgs.PollerID} } // Build SRQL query for recent logs @@ -115,7 +117,7 @@ func (m *MCPServer) registerLogTools() { Msg("Executing recent logs query") // Execute SRQL query via API - results, err := m.executeSRQLQuery(ctx, query, limit) + results, err := m.executeSRQLQueryWithParams(ctx, query, queryParams, limit) if err != nil { return nil, fmt.Errorf("failed to execute recent logs query: %w", err) } diff --git a/pkg/mcp/tools_sweeps.go b/pkg/mcp/tools_sweeps.go index 48f049cd6..bd8860d9b 100644 --- a/pkg/mcp/tools_sweeps.go +++ b/pkg/mcp/tools_sweeps.go @@ -91,11 +91,13 @@ func (m *MCPServer) registerGetRecentSweepsTool() { timeFilter := BuildTimeRangeFilter(&startTime, nil, "timestamp") var filters []string + var queryParams []any if timeFilter != "" { filters = append(filters, timeFilter) } if recentArgs.PollerID != "" { - filters = append(filters, fmt.Sprintf("poller_id = '%s'", recentArgs.PollerID)) + filters = append(filters, "poller_id = $1") + queryParams = append(queryParams, recentArgs.PollerID) } combinedFilter := CombineFilters(filters...) @@ -111,7 +113,7 @@ func (m *MCPServer) registerGetRecentSweepsTool() { Msg("Executing recent sweeps query") // Execute SRQL query via API - results, err := m.executeSRQLQuery(ctx, query, limit) + results, err := m.executeSRQLQueryWithParams(ctx, query, queryParams, limit) if err != nil { return nil, fmt.Errorf("failed to execute recent sweeps query: %w", err) } @@ -148,11 +150,13 @@ func (m *MCPServer) registerGetSweepSummaryTool() { timeFilter := BuildTimeRangeFilter(summaryArgs.StartTime, summaryArgs.EndTime, "timestamp") var filters []string + var queryParams []any if timeFilter != "" { filters = append(filters, timeFilter) } if summaryArgs.PollerID != "" { - filters = append(filters, fmt.Sprintf("poller_id = '%s'", summaryArgs.PollerID)) + filters = append(filters, "poller_id = $1") + queryParams = append(queryParams, summaryArgs.PollerID) } combinedFilter := CombineFilters(filters...) @@ -167,7 +171,7 @@ func (m *MCPServer) registerGetSweepSummaryTool() { Msg("Executing sweep summary query") // Execute SRQL query via API - results, err := m.executeSRQLQuery(ctx, query, 0) + results, err := m.executeSRQLQueryWithParams(ctx, query, queryParams, 0) if err != nil { return nil, fmt.Errorf("failed to execute sweep summary query: %w", err) } diff --git a/pkg/models/otel.go b/pkg/models/otel.go index 08ee2ddde..13483c05d 100644 --- a/pkg/models/otel.go +++ b/pkg/models/otel.go @@ -39,6 +39,7 @@ type OTELMetricRow struct { IsSlow bool Component string Level string + Unit string // Unit of measurement (e.g., "ms", "s", "bytes", "1" for counts) } // OTELTraceRow stores a single OTEL trace span row. diff --git a/pkg/models/sweep.go b/pkg/models/sweep.go index 0f515cee9..e787ef84e 100644 --- a/pkg/models/sweep.go +++ b/pkg/models/sweep.go @@ -88,7 +88,7 @@ type Config struct { type SweepMode string const ( - ModeTCP SweepMode = "tcp" // SYN scanning (fast but breaks conntrack) + ModeTCP SweepMode = "tcp" // SYN scanning (fast but breaks conntrack) ModeTCPConnect SweepMode = "tcp_connect" // TCP connect scanning (safe for conntrack) ModeICMP SweepMode = "icmp" ) @@ -160,6 +160,89 @@ type PortResult struct { RespTime time.Duration `json:"response_time"` Service string `json:"service,omitempty"` // Optional service identification } + +// DeepCopyHostResult returns a snapshot copy of src that does not alias any of the +// pointer/slice/map fields of the source HostResult. +func DeepCopyHostResult(src *HostResult) HostResult { + if src == nil { + return HostResult{} + } + + dst := HostResult{ + Host: src.Host, + Available: src.Available, + FirstSeen: src.FirstSeen, + LastSeen: src.LastSeen, + ResponseTime: src.ResponseTime, + } + + copiedPortResults := make(map[*PortResult]*PortResult) + copiedByPort := make(map[int]*PortResult) + + if src.PortResults != nil { + dst.PortResults = make([]*PortResult, 0, len(src.PortResults)) + for _, pr := range src.PortResults { + if pr == nil { + dst.PortResults = append(dst.PortResults, nil) + continue + } + + prCopy := &PortResult{ + Port: pr.Port, + Available: pr.Available, + RespTime: pr.RespTime, + Service: pr.Service, + } + + copiedPortResults[pr] = prCopy + copiedByPort[pr.Port] = prCopy + dst.PortResults = append(dst.PortResults, prCopy) + } + } + + if src.PortMap != nil { + dst.PortMap = make(map[int]*PortResult, len(src.PortMap)) + for port, pr := range src.PortMap { + if pr == nil { + dst.PortMap[port] = nil + continue + } + + if prCopy, ok := copiedPortResults[pr]; ok { + dst.PortMap[port] = prCopy + continue + } + + if prCopy, ok := copiedByPort[port]; ok { + dst.PortMap[port] = prCopy + continue + } + + prCopy := &PortResult{ + Port: pr.Port, + Available: pr.Available, + RespTime: pr.RespTime, + Service: pr.Service, + } + + copiedPortResults[pr] = prCopy + copiedByPort[port] = prCopy + dst.PortMap[port] = prCopy + dst.PortResults = append(dst.PortResults, prCopy) + } + } + + if src.ICMPStatus != nil { + dst.ICMPStatus = &ICMPStatus{ + Available: src.ICMPStatus.Available, + RoundTrip: src.ICMPStatus.RoundTrip, + PacketLoss: src.ICMPStatus.PacketLoss, + } + } + + return dst +} + type PortCount struct { Port int `json:"port"` Available int `json:"available"` diff --git a/pkg/models/sweep_deepcopy_test.go b/pkg/models/sweep_deepcopy_test.go new file mode 100644 index 000000000..3bac325cf --- /dev/null +++ b/pkg/models/sweep_deepcopy_test.go @@ -0,0 +1,78 @@ +package models + +import ( + "testing" + "time" +) + +func TestDeepCopyHostResult_NoAliasing(t *testing.T) { + icmp := &ICMPStatus{Available: true, RoundTrip: 10 * time.Millisecond, PacketLoss: 0} + pr80 := &PortResult{Port: 80, Available: true, RespTime: 5 * time.Millisecond, Service: "http"} + pr443 := &PortResult{Port: 443, Available: false, RespTime: 0, Service: "https"} + + src := &HostResult{ + Host: "192.168.1.1", + Available: true, + FirstSeen: time.Unix(100, 0), + LastSeen: time.Unix(200, 0), + PortResults: []*PortResult{pr80, pr443}, + PortMap: map[int]*PortResult{80: pr80, 443: pr443}, + ICMPStatus: icmp, + ResponseTime: 11 * time.Millisecond, + } + + dst := DeepCopyHostResult(src) + + if dst.Host != src.Host || dst.Available != src.Available || !dst.FirstSeen.Equal(src.FirstSeen) || !dst.LastSeen.Equal(src.LastSeen) || dst.ResponseTime != src.ResponseTime { + t.Fatalf("expected scalar fields to match") + } + + if dst.ICMPStatus == nil || src.ICMPStatus == nil { + t.Fatalf("expected ICMPStatus to be non-nil") + } + if dst.ICMPStatus == src.ICMPStatus { + t.Fatalf("expected ICMPStatus to be deep-copied") + } + if *dst.ICMPStatus != *src.ICMPStatus { + t.Fatalf("expected ICMPStatus values to match") + } + + if dst.PortResults == nil || len(dst.PortResults) != len(src.PortResults) { + t.Fatalf("expected PortResults to be copied") + } + if dst.PortMap == nil || len(dst.PortMap) != len(src.PortMap) { + t.Fatalf("expected PortMap to be copied") + } + if &dst.PortResults[0] == &src.PortResults[0] { + t.Fatalf("unexpected slice aliasing") + } + + for i := range src.PortResults { + if src.PortResults[i] == nil || dst.PortResults[i] == nil { + t.Fatalf("unexpected nil PortResult at index %d", i) + } + if src.PortResults[i] == dst.PortResults[i] { + t.Fatalf("expected PortResults[%d] to be deep-copied", i) + } + if *src.PortResults[i] != *dst.PortResults[i] { + t.Fatalf("expected PortResults[%d] values to match", i) + } + if got := dst.PortMap[dst.PortResults[i].Port]; got != dst.PortResults[i] { + t.Fatalf("expected PortMap to reference the same copied PortResult for port %d", dst.PortResults[i].Port) + } + } + + dst.PortResults[0].Available = false + dst.PortMap[80].Service = "changed" + dst.ICMPStatus.PacketLoss = 50 + + if src.PortResults[0].Available == dst.PortResults[0].Available { + t.Fatalf("expected PortResults mutation not to affect source") + } + if src.PortMap[80].Service == dst.PortMap[80].Service { + t.Fatalf("expected PortMap mutation not to affect source") + } + if src.ICMPStatus.PacketLoss == dst.ICMPStatus.PacketLoss { + t.Fatalf("expected ICMPStatus mutation not to affect source") + } +} diff --git a/pkg/registry/canon_simulation_test.go b/pkg/registry/canon_simulation_test.go index 3de3f21a3..be4a938d2 100644 --- a/pkg/registry/canon_simulation_test.go +++ b/pkg/registry/canon_simulation_test.go @@ -300,11 +300,11 @@ func TestDIREIdentityResolution(t *testing.T) { func setupDIREMockDB(mockDB *db.MockService, identifierStore map[string]string) { // Mock BatchGetDeviceIDsByIdentifier to check if identifier already exists mockDB.EXPECT(). - BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, identifierType string, identifierValues []string) (map[string]string, error) { + BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, identifierType string, identifierValues []string, partition string) (map[string]string, error) { result := make(map[string]string) for _, val := range identifierValues { - key := identifierType + ":" + val + key := strongIdentifierCacheKey(partition, identifierType, val) if deviceID, ok := identifierStore[key]; ok { result[val] = deviceID } @@ -318,7 +318,7 @@ func setupDIREMockDB(mockDB *db.MockService, identifierStore map[string]string) UpsertDeviceIdentifiers(gomock.Any(), gomock.Any()). DoAndReturn(func(_ context.Context, identifiers []*models.DeviceIdentifier) error { for _, id := range identifiers { - key := id.IDType + ":" + id.IDValue + key := strongIdentifierCacheKey(id.Partition, id.IDType, id.IDValue) identifierStore[key] = id.DeviceID } return nil diff --git a/pkg/registry/device_store.go b/pkg/registry/device_store.go index 69904afd3..e129ceb4f 100644 --- a/pkg/registry/device_store.go +++ b/pkg/registry/device_store.go @@ -195,13 +195,13 @@ func cloneDeviceRecord(src *DeviceRecord) *DeviceRecord { dst.CollectorAgentID = &collectorAgentID } - if len(src.DiscoverySources) > 0 { - dst.DiscoverySources = append([]string(nil), src.DiscoverySources...) + if src.DiscoverySources != nil { + dst.DiscoverySources = append(make([]string, 0, len(src.DiscoverySources)), src.DiscoverySources...) } - if len(src.Capabilities) > 0 { - dst.Capabilities = append([]string(nil), src.Capabilities...) + if src.Capabilities != nil { + dst.Capabilities = append(make([]string, 0, len(src.Capabilities)), src.Capabilities...) } - if len(src.Metadata) > 0 { + if src.Metadata != nil { meta := make(map[string]string, len(src.Metadata)) for k, v := range src.Metadata { meta[k] = v diff --git a/pkg/registry/device_store_test.go b/pkg/registry/device_store_test.go index 3959948eb..1cdbe8b7d 100644 --- a/pkg/registry/device_store_test.go +++ b/pkg/registry/device_store_test.go @@ -78,6 +78,63 @@ func TestUpsertAndGetDeviceRecord(t *testing.T) { } } +func TestCloneDeviceRecord_EmptyMetadataMapDoesNotAlias(t *testing.T) { + original := &DeviceRecord{ + DeviceID: testDeviceID1, + IP: "10.0.0.1", + Metadata: map[string]string{}, + } + + clone1 := cloneDeviceRecord(original) + if clone1 == nil { + t.Fatalf("expected clone to be non-nil") + } + + clone1.Metadata["k"] = "v1" + + if len(original.Metadata) != 0 { + t.Fatalf("expected original metadata to remain empty, got %#v", original.Metadata) + } + + clone2 := cloneDeviceRecord(original) + if clone2 == nil { + t.Fatalf("expected second clone to be non-nil") + } + if _, ok := clone2.Metadata["k"]; ok { + t.Fatalf("expected second clone metadata to not include prior clone mutation, got %#v", clone2.Metadata) + } +} + +func TestCloneDeviceRecord_EmptySlicesWithCapacityDoNotAlias(t *testing.T) { + original := &DeviceRecord{ + DeviceID: testDeviceID1, + IP: "10.0.0.1", + DiscoverySources: make([]string, 0, 1), + Capabilities: make([]string, 0, 1), + } + + clone1 := cloneDeviceRecord(original) + if clone1 == nil { + t.Fatalf("expected clone to be non-nil") + } + clone1.DiscoverySources = append(clone1.DiscoverySources, "a") + clone1.Capabilities = append(clone1.Capabilities, "icmp") + + clone2 := cloneDeviceRecord(original) + if clone2 == nil { + t.Fatalf("expected second clone to be non-nil") + } + clone2.DiscoverySources = append(clone2.DiscoverySources, "b") + clone2.Capabilities = append(clone2.Capabilities, "snmp") + + if got := clone1.DiscoverySources[0]; got != "a" { + t.Fatalf("expected clone1 discovery source to remain %q, got %q", "a", got) + } + if got := clone1.Capabilities[0]; got != "icmp" { + t.Fatalf("expected clone1 capability to remain %q, got %q", "icmp", got) + } +} + func TestUpsertUpdatesIndexes(t *testing.T) { reg := newTestDeviceRegistry() diff --git a/pkg/registry/identity_engine.go b/pkg/registry/identity_engine.go index 56805b521..9c71e9c10 100644 --- a/pkg/registry/identity_engine.go +++ b/pkg/registry/identity_engine.go @@ -86,7 +86,7 @@ type identityEngineCache struct { maxSize int // Maps strong identifiers to device IDs - // Key format: "<type>:<value>" e.g., "armis_device_id:12345" + // Key format: "<partition>:<type>:<value>" e.g., "default:armis_device_id:12345" strongIDToDeviceID map[string]engineCacheEntry // Maps IP addresses to device IDs (weak identifier, lower priority) @@ -184,21 +184,29 @@ func (e *IdentityEngine) ExtractStrongIdentifiers(update *models.DeviceUpdate) * // Build cache keys for strong identifiers if ids.ArmisID != "" { - ids.CacheKeys = append(ids.CacheKeys, IdentifierTypeArmis+":"+ids.ArmisID) + ids.CacheKeys = append(ids.CacheKeys, strongIdentifierCacheKey(ids.Partition, IdentifierTypeArmis, ids.ArmisID)) } if ids.IntegrationID != "" { - ids.CacheKeys = append(ids.CacheKeys, IdentifierTypeIntegration+":"+ids.IntegrationID) + ids.CacheKeys = append(ids.CacheKeys, strongIdentifierCacheKey(ids.Partition, IdentifierTypeIntegration, ids.IntegrationID)) } if ids.NetboxID != "" { - ids.CacheKeys = append(ids.CacheKeys, IdentifierTypeNetbox+":"+ids.NetboxID) + ids.CacheKeys = append(ids.CacheKeys, strongIdentifierCacheKey(ids.Partition, IdentifierTypeNetbox, ids.NetboxID)) } if ids.MAC != "" { - ids.CacheKeys = append(ids.CacheKeys, IdentifierTypeMAC+":"+ids.MAC) + ids.CacheKeys = append(ids.CacheKeys, strongIdentifierCacheKey(ids.Partition, IdentifierTypeMAC, ids.MAC)) } return ids } +func strongIdentifierCacheKey(partition, idType, idValue string) string { + partition = strings.TrimSpace(partition) + if partition == "" { + partition = defaultPartition + } + return partition + ":" + idType + ":" + strings.TrimSpace(idValue) +} + // ResolveDeviceID resolves a device update to a canonical ServiceRadar device ID. // // If an existing device matches the update's strong identifiers, it returns that device's ID. @@ -489,84 +497,146 @@ func (e *IdentityEngine) batchLookupByStrongIdentifiers( return matches } - // Collect all identifiers by type - armisIDs := make([]string, 0) - integrationIDs := make([]string, 0) - netboxIDs := make([]string, 0) - macs := make([]string, 0) + updatesByPartition := groupUpdatesByPartition(updates, updateIdentifiers) + for partition, partitionUpdates := range updatesByPartition { + partitionMatches := e.batchLookupByStrongIdentifiersForPartition(ctx, partition, partitionUpdates, updateIdentifiers) + for update, deviceID := range partitionMatches { + matches[update] = deviceID + } + } + + return matches +} +func groupUpdatesByPartition(updates []*models.DeviceUpdate, updateIdentifiers map[*models.DeviceUpdate]*StrongIdentifiers) map[string][]*models.DeviceUpdate { + updatesByPartition := make(map[string][]*models.DeviceUpdate) for _, update := range updates { ids := updateIdentifiers[update] if ids == nil { continue } - if ids.ArmisID != "" { - armisIDs = append(armisIDs, ids.ArmisID) - } - if ids.IntegrationID != "" { - integrationIDs = append(integrationIDs, ids.IntegrationID) - } - if ids.NetboxID != "" { - netboxIDs = append(netboxIDs, ids.NetboxID) - } - if ids.MAC != "" { - macs = append(macs, ids.MAC) + + partition := strings.TrimSpace(ids.Partition) + if partition == "" { + partition = defaultPartition } + + updatesByPartition[partition] = append(updatesByPartition[partition], update) + } + return updatesByPartition +} + +func (e *IdentityEngine) batchLookupByStrongIdentifiersForPartition( + ctx context.Context, + partition string, + updates []*models.DeviceUpdate, + updateIdentifiers map[*models.DeviceUpdate]*StrongIdentifiers, +) map[*models.DeviceUpdate]string { + matches := make(map[*models.DeviceUpdate]string) + if e == nil || e.db == nil || len(updates) == 0 { + return matches } - // Batch query each identifier type + identifierSets := collectStrongIdentifierSets(updates, updateIdentifiers) identifierToDevice := make(map[string]string) - if len(armisIDs) > 0 { - results, err := e.db.BatchGetDeviceIDsByIdentifier(ctx, IdentifierTypeArmis, armisIDs) - if err == nil { - for idValue, deviceID := range results { - identifierToDevice[IdentifierTypeArmis+":"+idValue] = deviceID - } + for _, entry := range []struct { + idType string + values map[string]struct{} + }{ + {IdentifierTypeArmis, identifierSets.armisIDs}, + {IdentifierTypeIntegration, identifierSets.integrationIDs}, + {IdentifierTypeNetbox, identifierSets.netboxIDs}, + {IdentifierTypeMAC, identifierSets.macs}, + } { + for key, deviceID := range e.batchLookupIdentifierType(ctx, entry.idType, entry.values, partition) { + identifierToDevice[key] = deviceID } } - if len(integrationIDs) > 0 { - results, err := e.db.BatchGetDeviceIDsByIdentifier(ctx, IdentifierTypeIntegration, integrationIDs) - if err == nil { - for idValue, deviceID := range results { - identifierToDevice[IdentifierTypeIntegration+":"+idValue] = deviceID - } + for _, update := range updates { + ids := updateIdentifiers[update] + if ids == nil { + continue } - } - if len(netboxIDs) > 0 { - results, err := e.db.BatchGetDeviceIDsByIdentifier(ctx, IdentifierTypeNetbox, netboxIDs) - if err == nil { - for idValue, deviceID := range results { - identifierToDevice[IdentifierTypeNetbox+":"+idValue] = deviceID + for _, key := range ids.CacheKeys { + if deviceID := identifierToDevice[key]; deviceID != "" { + matches[update] = deviceID + break } } } - if len(macs) > 0 { - results, err := e.db.BatchGetDeviceIDsByIdentifier(ctx, IdentifierTypeMAC, macs) - if err == nil { - for idValue, deviceID := range results { - identifierToDevice[IdentifierTypeMAC+":"+idValue] = deviceID - } - } + return matches +} + +type strongIdentifierSets struct { + armisIDs map[string]struct{} + integrationIDs map[string]struct{} + netboxIDs map[string]struct{} + macs map[string]struct{} +} + +func collectStrongIdentifierSets( + updates []*models.DeviceUpdate, + updateIdentifiers map[*models.DeviceUpdate]*StrongIdentifiers, +) strongIdentifierSets { + sets := strongIdentifierSets{ + armisIDs: make(map[string]struct{}), + integrationIDs: make(map[string]struct{}), + netboxIDs: make(map[string]struct{}), + macs: make(map[string]struct{}), } - // Match updates to device IDs (respecting priority order) for _, update := range updates { ids := updateIdentifiers[update] if ids == nil { continue } + if ids.ArmisID != "" { + sets.armisIDs[ids.ArmisID] = struct{}{} + } + if ids.IntegrationID != "" { + sets.integrationIDs[ids.IntegrationID] = struct{}{} + } + if ids.NetboxID != "" { + sets.netboxIDs[ids.NetboxID] = struct{}{} + } + if ids.MAC != "" { + sets.macs[ids.MAC] = struct{}{} + } + } - // Check in priority order - for _, key := range ids.CacheKeys { - if deviceID := identifierToDevice[key]; deviceID != "" { - matches[update] = deviceID - break - } + return sets +} + +func (e *IdentityEngine) batchLookupIdentifierType( + ctx context.Context, + identifierType string, + identifierValues map[string]struct{}, + partition string, +) map[string]string { + matches := make(map[string]string) + if e == nil || e.db == nil || identifierType == "" || len(identifierValues) == 0 { + return matches + } + + values := make([]string, 0, len(identifierValues)) + for v := range identifierValues { + values = append(values, v) + } + + results, err := e.db.BatchGetDeviceIDsByIdentifier(ctx, identifierType, values, partition) + if err != nil { + return matches + } + + for idValue, deviceID := range results { + if deviceID == "" { + continue } + matches[strongIdentifierCacheKey(partition, identifierType, idValue)] = deviceID } return matches diff --git a/pkg/registry/identity_engine_partition_test.go b/pkg/registry/identity_engine_partition_test.go new file mode 100644 index 000000000..6067c5fe6 --- /dev/null +++ b/pkg/registry/identity_engine_partition_test.go @@ -0,0 +1,51 @@ +package registry + +import ( + "context" + "testing" + + "github.com/carverauto/serviceradar/pkg/db" + "github.com/carverauto/serviceradar/pkg/logger" + "github.com/carverauto/serviceradar/pkg/models" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestBatchLookupByStrongIdentifiers_PartitionScoped(t *testing.T) { + ctrl := gomock.NewController(t) + mockDB := db.NewMockService(ctrl) + engine := NewIdentityEngine(mockDB, logger.NewTestLogger()) + + mac := "AA:BB:CC:DD:EE:FF" + normalizedMAC := NormalizeMAC(mac) + + updateA := &models.DeviceUpdate{Partition: "tenant-a", MAC: stringPtr(mac)} + updateB := &models.DeviceUpdate{Partition: "tenant-b", MAC: stringPtr(mac)} + + updateIdentifiers := map[*models.DeviceUpdate]*StrongIdentifiers{ + updateA: engine.ExtractStrongIdentifiers(updateA), + updateB: engine.ExtractStrongIdentifiers(updateB), + } + + mockDB.EXPECT(). + BatchGetDeviceIDsByIdentifier(gomock.Any(), IdentifierTypeMAC, gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, identifierType string, identifierValues []string, partition string) (map[string]string, error) { + require.Equal(t, IdentifierTypeMAC, identifierType) + require.ElementsMatch(t, []string{normalizedMAC}, identifierValues) + + switch partition { + case "tenant-a": + return map[string]string{normalizedMAC: "sr:tenant-a-device-123"}, nil + case "tenant-b": + return map[string]string{normalizedMAC: "sr:tenant-b-device-456"}, nil + default: + return map[string]string{}, nil + } + }). + Times(2) + + matches := engine.batchLookupByStrongIdentifiers(context.Background(), []*models.DeviceUpdate{updateA, updateB}, updateIdentifiers) + + require.Equal(t, "sr:tenant-a-device-123", matches[updateA]) + require.Equal(t, "sr:tenant-b-device-456", matches[updateB]) +} diff --git a/pkg/registry/identity_publisher.go b/pkg/registry/identity_publisher.go deleted file mode 100644 index f77b47616..000000000 --- a/pkg/registry/identity_publisher.go +++ /dev/null @@ -1,570 +0,0 @@ -package registry - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/cenkalti/backoff/v5" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/carverauto/serviceradar/pkg/identitymap" - "github.com/carverauto/serviceradar/pkg/logger" - "github.com/carverauto/serviceradar/pkg/models" - "github.com/carverauto/serviceradar/proto" - "google.golang.org/grpc" -) - -type kvIdentityClient interface { - Get(ctx context.Context, in *proto.GetRequest, opts ...grpc.CallOption) (*proto.GetResponse, error) - PutIfAbsent(ctx context.Context, in *proto.PutRequest, opts ...grpc.CallOption) (*proto.PutResponse, error) - Update(ctx context.Context, in *proto.UpdateRequest, opts ...grpc.CallOption) (*proto.UpdateResponse, error) - Delete(ctx context.Context, in *proto.DeleteRequest, opts ...grpc.CallOption) (*proto.DeleteResponse, error) -} - -type identityPublisher struct { - kvClient kvIdentityClient - namespace string - ttlSeconds int64 - metrics *identityPublisherMetrics - logger logger.Logger - cache *identityCache -} - -const ( - identityInitialBackoff = 50 * time.Millisecond - identityMaxBackoff = 750 * time.Millisecond - identityMaxElapsed = 5 * time.Second - identityCacheTTL = 5 * time.Minute -) - -// WithIdentityPublisher wires a KV-backed identity map publisher into the device registry. -func WithIdentityPublisher(client kvIdentityClient, namespace string, ttl time.Duration) Option { - return func(r *DeviceRegistry) { - if r == nil { - return - } - r.identityPublisher = newIdentityPublisher(client, namespace, ttl, r.logger) - } -} - -type identityPublisherMetrics struct { - publishBatches atomic.Int64 - publishedKeys atomic.Int64 - deletedKeys atomic.Int64 - failures atomic.Int64 -} - -func newIdentityPublisherMetrics() *identityPublisherMetrics { - return &identityPublisherMetrics{} -} - -func (m *identityPublisherMetrics) recordPublish(keyCount int) { - m.publishBatches.Add(1) - m.publishedKeys.Add(int64(keyCount)) -} - -func (m *identityPublisherMetrics) recordDelete(keyCount int) { - if keyCount <= 0 { - return - } - m.deletedKeys.Add(int64(keyCount)) -} - -func (m *identityPublisherMetrics) recordFailure() { - m.failures.Add(1) -} - -type identityCache struct { - mu sync.RWMutex - ttl time.Duration - entries map[string]identityCacheEntry -} - -type identityCacheEntry struct { - metadataHash string - attributesHash string - revision uint64 - expiresAt time.Time -} - -func newIdentityCache(ttl time.Duration) *identityCache { - if ttl < 0 { - ttl = 0 - } - return &identityCache{ - ttl: ttl, - entries: make(map[string]identityCacheEntry), - } -} - -func (c *identityCache) get(key string) *identityCacheEntry { - if c == nil { - return nil - } - - c.mu.RLock() - entry, ok := c.entries[key] - c.mu.RUnlock() - if !ok { - return nil - } - - if !entry.expiresAt.IsZero() && time.Now().After(entry.expiresAt) { - c.mu.Lock() - if current, ok := c.entries[key]; ok && current.expiresAt.Equal(entry.expiresAt) { - delete(c.entries, key) - } - c.mu.Unlock() - return nil - } - - e := entry - return &e -} - -func (c *identityCache) set(key, metadataHash, attrsHash string, revision uint64) { - if c == nil { - return - } - - var expiresAt time.Time - if c.ttl > 0 { - expiresAt = time.Now().Add(c.ttl) - } - - c.mu.Lock() - c.entries[key] = identityCacheEntry{ - metadataHash: metadataHash, - attributesHash: attrsHash, - revision: revision, - expiresAt: expiresAt, - } - c.mu.Unlock() -} - -func (c *identityCache) delete(key string) { - if c == nil { - return - } - - c.mu.Lock() - delete(c.entries, key) - c.mu.Unlock() -} - -func newIdentityPublisher(client kvIdentityClient, namespace string, ttl time.Duration, log logger.Logger) *identityPublisher { - if client == nil { - return nil - } - ns := strings.TrimSpace(namespace) - if ns == "" { - ns = identitymap.DefaultNamespace - } - return &identityPublisher{ - kvClient: client, - namespace: ns, - ttlSeconds: int64(ttl / time.Second), - metrics: newIdentityPublisherMetrics(), - logger: log, - cache: newIdentityCache(identityCacheTTL), - } -} - -func (p *identityPublisher) Publish(ctx context.Context, updates []*models.DeviceUpdate) error { - if p == nil || p.kvClient == nil || len(updates) == 0 { - return nil - } - - now := time.Now().UTC() - var publishErr error - - for _, update := range updates { - if update == nil || shouldSkipIdentityPublish(update) { - continue - } - - record := &identitymap.Record{ - CanonicalDeviceID: update.DeviceID, - Partition: update.Partition, - MetadataHash: identitymap.HashIdentityMetadata(update), - UpdatedAt: now, - Attributes: buildIdentityAttributes(update), - } - - payload, err := identitymap.MarshalRecord(record) - if err != nil { - publishErr = errors.Join(publishErr, fmt.Errorf("marshal canonical record: %w", err)) - continue - } - - snapshot, snapErr := p.existingIdentitySnapshot(ctx, update.DeviceID) - if snapErr != nil { - publishErr = errors.Join(publishErr, snapErr) - } - if snapshot != nil && snapshot.canonicalKey != "" { - p.cache.set(snapshot.canonicalKey, snapshot.metadataHash, snapshot.attrsHash, snapshot.revision) - } - - newKeySet := make(map[string]struct{}) - - for _, key := range identitymap.BuildKeys(update) { - keyPath := key.KeyPath(p.namespace) - newKeySet[keyPath] = struct{}{} - if err := p.upsertIdentity(ctx, keyPath, payload, record.MetadataHash, record.Attributes); err != nil { - publishErr = errors.Join(publishErr, err) - } - } - - if snapshot != nil { - if stale := snapshot.staleKeys(newKeySet); len(stale) > 0 { - if err := p.deleteIdentityKeys(ctx, stale); err != nil { - publishErr = errors.Join(publishErr, err) - } - } - } - } - - if publishErr != nil { - p.metrics.recordFailure() - } - - return publishErr -} - -func (p *identityPublisher) upsertIdentity(ctx context.Context, key string, payload []byte, metadataHash string, attrs map[string]string) error { - attrsHash := identitymap.HashMetadata(attrs) - - if cached := p.cache.get(key); cached != nil { - if cached.metadataHash == metadataHash && cached.attributesHash == attrsHash { - identitymap.RecordKVPublish(ctx, 1, "unchanged") - return nil - } - if cached.revision > 0 { - resp, err := p.kvClient.Update(ctx, &proto.UpdateRequest{ - Key: key, - Value: payload, - Revision: cached.revision, - TtlSeconds: p.ttlSeconds, - }) - if err == nil { - p.metrics.recordPublish(1) - identitymap.RecordKVPublish(ctx, 1, "updated") - newRevision := uint64(0) - if resp != nil { - newRevision = resp.GetRevision() - } - p.cache.set(key, metadataHash, attrsHash, newRevision) - p.logger.Debug().Str("key", key).Msg("Updated canonical identity entry in KV (cache fast-path)") - return nil - } - if shouldRetryKV(err) { - p.cache.delete(key) - code := status.Code(err) - if code == codes.AlreadyExists || code == codes.Aborted { - identitymap.RecordKVConflict(ctx, code.String()) - p.logger.Debug().Str("key", key).Str("reason", code.String()).Msg("KV identity update conflict on cache fast-path") - } - } else { - return err - } - } - } - - bo := backoff.NewExponentialBackOff() - bo.InitialInterval = identityInitialBackoff - bo.MaxInterval = identityMaxBackoff - bo.Multiplier = 1.6 - bo.RandomizationFactor = 0.2 - - operation := func() (struct{}, error) { - resp, err := p.kvClient.Get(ctx, &proto.GetRequest{Key: key}) - if err != nil { - if shouldRetryKV(err) { - return struct{}{}, err - } - return struct{}{}, backoff.Permanent(err) - } - - if !resp.GetFound() { - _, err := p.kvClient.PutIfAbsent(ctx, &proto.PutRequest{Key: key, Value: payload, TtlSeconds: p.ttlSeconds}) - if err != nil { - if shouldRetryKV(err) { - code := status.Code(err) - if code == codes.AlreadyExists || code == codes.Aborted { - identitymap.RecordKVConflict(ctx, code.String()) - p.logger.Debug().Str("key", key).Str("reason", code.String()).Msg("KV identity publish encountered conflict") - } - return struct{}{}, err - } - return struct{}{}, backoff.Permanent(err) - } - - p.metrics.recordPublish(1) - identitymap.RecordKVPublish(ctx, 1, "created") - p.logger.Debug().Str("key", key).Msg("Created canonical identity entry in KV") - p.cache.set(key, metadataHash, attrsHash, 0) - return struct{}{}, nil - } - - existing, err := identitymap.UnmarshalRecord(resp.GetValue()) - if err != nil { - if errors.Is(err, identitymap.ErrCorruptRecord) { - p.logger.Warn().Str("key", key).Err(err).Msg("Replacing corrupt canonical identity entry in KV") - p.cache.delete(key) - } else { - return struct{}{}, backoff.Permanent(fmt.Errorf("unmarshal existing canonical record: %w", err)) - } - } else { - existingAttrsHash := identitymap.HashMetadata(existing.Attributes) - p.cache.set(key, existing.MetadataHash, existingAttrsHash, resp.GetRevision()) - if existing.MetadataHash == metadataHash && attributesEqual(existing.Attributes, attrs) { - identitymap.RecordKVPublish(ctx, 1, "unchanged") - return struct{}{}, nil - } - } - - updateResp, err := p.kvClient.Update(ctx, &proto.UpdateRequest{ - Key: key, - Value: payload, - Revision: resp.GetRevision(), - TtlSeconds: p.ttlSeconds, - }) - if err != nil { - if shouldRetryKV(err) { - code := status.Code(err) - if code == codes.AlreadyExists || code == codes.Aborted { - identitymap.RecordKVConflict(ctx, code.String()) - p.logger.Debug().Str("key", key).Str("reason", code.String()).Msg("KV identity update encountered conflict") - } - p.cache.delete(key) - return struct{}{}, err - } - return struct{}{}, backoff.Permanent(err) - } - - p.metrics.recordPublish(1) - identitymap.RecordKVPublish(ctx, 1, "updated") - p.logger.Debug().Str("key", key).Msg("Updated canonical identity entry in KV") - var newRevision uint64 - if updateResp != nil { - newRevision = updateResp.GetRevision() - } - p.cache.set(key, metadataHash, attrsHash, newRevision) - return struct{}{}, nil - } - - if _, err := backoff.Retry(ctx, operation, backoff.WithBackOff(bo), backoff.WithMaxElapsedTime(identityMaxElapsed)); err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return err - } - identitymap.RecordKVConflict(ctx, "retry_exhausted") - p.logger.Warn().Str("key", key).Err(err).Msg("KV identity publish exhausted retries") - return fmt.Errorf("publish identity key %s: %w", key, err) - } - - return nil -} - -func shouldRetryKV(err error) bool { - if err == nil { - return false - } - - //exhaustive:ignore - switch status.Code(err) { - case codes.OK: - return false - case codes.AlreadyExists, codes.Aborted, codes.Unavailable, codes.ResourceExhausted, codes.DeadlineExceeded, codes.Internal: - return true - default: - return false - } -} - -func shouldSkipIdentityPublish(update *models.DeviceUpdate) bool { - if update == nil { - return true - } - if update.DeviceID == "" { - return true - } - if update.Source == models.DiscoverySourceSweep { - return true - } - if update.Metadata != nil { - if deleted, ok := update.Metadata["_deleted"]; ok && strings.EqualFold(deleted, "true") { - return true - } - } - return false -} - -func buildIdentityAttributes(update *models.DeviceUpdate) map[string]string { - attrs := map[string]string{} - if update == nil { - return nil - } - - if update.IP != "" { - attrs["ip"] = update.IP - } - if update.Partition != "" { - attrs["partition"] = update.Partition - } - if update.Hostname != nil && strings.TrimSpace(*update.Hostname) != "" { - attrs["hostname"] = strings.TrimSpace(*update.Hostname) - } - if src := strings.TrimSpace(string(update.Source)); src != "" { - attrs["source"] = src - } - if update.Metadata != nil { - if armis := strings.TrimSpace(update.Metadata["armis_device_id"]); armis != "" { - attrs["armis_device_id"] = armis - } - if integration := strings.TrimSpace(update.Metadata["integration_id"]); integration != "" { - attrs["integration_id"] = integration - } - if netbox := strings.TrimSpace(update.Metadata["netbox_device_id"]); netbox != "" { - attrs["netbox_device_id"] = netbox - } - if typ := strings.TrimSpace(update.Metadata["integration_type"]); typ != "" { - attrs["integration_type"] = typ - } - } - if update.MAC != nil { - mac := strings.TrimSpace(*update.MAC) - if mac != "" { - attrs["mac"] = strings.ToUpper(mac) - } - } - if len(attrs) == 0 { - return nil - } - return attrs -} - -func attributesEqual(existing, desired map[string]string) bool { - if len(desired) == 0 { - return len(existing) == 0 - } - for key, val := range desired { - if strings.TrimSpace(val) == "" { - continue - } - if strings.TrimSpace(existing[key]) != strings.TrimSpace(val) { - return false - } - } - return true -} - -func (r *DeviceRegistry) publishIdentityMap(ctx context.Context, updates []*models.DeviceUpdate) { - if r.identityPublisher == nil { - return - } - if err := r.identityPublisher.Publish(ctx, updates); err != nil { - r.logger.Warn().Err(err).Msg("Failed to publish identity map updates") - } -} - -type identitySnapshot struct { - keys map[string]struct{} - canonicalKey string - metadataHash string - attrsHash string - revision uint64 -} - -func (s *identitySnapshot) staleKeys(newKeys map[string]struct{}) []string { - if s == nil || len(s.keys) == 0 { - return nil - } - - stale := make([]string, 0, len(s.keys)) - for key := range s.keys { - if _, ok := newKeys[key]; !ok { - stale = append(stale, key) - } - } - return stale -} - -func (p *identityPublisher) existingIdentitySnapshot(ctx context.Context, deviceID string) (*identitySnapshot, error) { - if p == nil || p.kvClient == nil || strings.TrimSpace(deviceID) == "" { - return nil, nil - } - - key := identitymap.Key{Kind: identitymap.KindDeviceID, Value: deviceID}.KeyPath(p.namespace) - resp, err := p.kvClient.Get(ctx, &proto.GetRequest{Key: key}) - if err != nil { - return nil, err - } - if !resp.GetFound() || len(resp.GetValue()) == 0 { - return nil, nil - } - - record, err := identitymap.UnmarshalRecord(resp.GetValue()) - if err != nil { - if errors.Is(err, identitymap.ErrCorruptRecord) { - p.logger.Warn().Str("key", key).Str("device_id", deviceID).Err(err).Msg("Ignoring corrupt canonical identity entry in KV") - return nil, nil - } - return nil, fmt.Errorf("unmarshal canonical record for device %s: %w", deviceID, err) - } - - keys := identitymap.BuildKeysFromRecord(record) - keySet := make(map[string]struct{}, len(keys)) - for _, identityKey := range keys { - keySet[identityKey.KeyPath(p.namespace)] = struct{}{} - } - - return &identitySnapshot{ - keys: keySet, - canonicalKey: key, - metadataHash: record.MetadataHash, - attrsHash: identitymap.HashMetadata(record.Attributes), - revision: resp.GetRevision(), - }, nil -} - -func (p *identityPublisher) deleteIdentityKeys(ctx context.Context, keys []string) error { - if p == nil || p.kvClient == nil || len(keys) == 0 { - return nil - } - - var deleteErr error - var deletedCount int - - for _, key := range keys { - if strings.TrimSpace(key) == "" { - continue - } - - _, err := p.kvClient.Delete(ctx, &proto.DeleteRequest{Key: key}) - if err != nil { - if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { - p.logger.Debug().Str("key", key).Msg("Stale identity key already removed from KV") - p.cache.delete(key) - continue - } - deleteErr = errors.Join(deleteErr, err) - continue - } - - deletedCount++ - p.cache.delete(key) - p.logger.Debug().Str("key", key).Msg("Deleted stale identity entry from KV") - } - - if deletedCount > 0 { - p.metrics.recordDelete(deletedCount) - } - - return deleteErr -} diff --git a/pkg/registry/identity_publisher_revision_bug_test.go b/pkg/registry/identity_publisher_revision_bug_test.go deleted file mode 100644 index 71acee28c..000000000 --- a/pkg/registry/identity_publisher_revision_bug_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package registry - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/carverauto/serviceradar/pkg/identitymap" - "github.com/carverauto/serviceradar/pkg/logger" - "github.com/carverauto/serviceradar/pkg/models" -) - -// This regression test ensures we do not cache a stale revision when the KV Update -// RPC omits a revision in its response. -func TestIdentityPublisherCachesStaleRevisionWithoutUpdateResponse(t *testing.T) { - t.Parallel() - - kvClient := newFakeIdentityKVClient() - pub := newIdentityPublisher(kvClient, identitymap.DefaultNamespace, 0, logger.NewTestLogger()) - - key := identitymap.Key{Kind: identitymap.KindDeviceID, Value: "device-revision"}.KeyPath(identitymap.DefaultNamespace) - - existingRecord := &identitymap.Record{ - CanonicalDeviceID: "device-revision", - MetadataHash: identitymap.HashIdentityMetadata(&models.DeviceUpdate{ - DeviceID: "device-revision", - Metadata: map[string]string{"armis_device_id": "armis-old"}, - }), - Attributes: map[string]string{"armis_device_id": "armis-old"}, - } - - payload, err := identitymap.MarshalRecord(existingRecord) - require.NoError(t, err) - - kvClient.entries[key] = &fakeKVEntry{value: payload, revision: 1} - kvClient.omitUpdateResp[key] = true - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - first := &models.DeviceUpdate{ - DeviceID: "device-revision", - Metadata: map[string]string{"armis_device_id": "armis-new"}, - } - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{first})) - - second := &models.DeviceUpdate{ - DeviceID: "device-revision", - Metadata: map[string]string{"armis_device_id": "armis-newer"}, - } - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{second})) - - require.Zero(t, kvClient.revisionMiss[key], "unexpected revision mismatch: stale revision cached after Update without response") -} diff --git a/pkg/registry/identity_publisher_test.go b/pkg/registry/identity_publisher_test.go deleted file mode 100644 index 733c440fb..000000000 --- a/pkg/registry/identity_publisher_test.go +++ /dev/null @@ -1,320 +0,0 @@ -package registry - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/carverauto/serviceradar/pkg/identitymap" - "github.com/carverauto/serviceradar/pkg/logger" - "github.com/carverauto/serviceradar/pkg/models" - "github.com/carverauto/serviceradar/proto" -) - -type fakeIdentityKVClient struct { - mu sync.Mutex - entries map[string]*fakeKVEntry - failUpdateOnce map[string]int - omitUpdateResp map[string]bool - updateCalls map[string]int - revisionMiss map[string]int - deleted map[string]int -} - -type fakeKVEntry struct { - value []byte - revision uint64 -} - -func newFakeIdentityKVClient() *fakeIdentityKVClient { - return &fakeIdentityKVClient{ - entries: make(map[string]*fakeKVEntry), - failUpdateOnce: make(map[string]int), - omitUpdateResp: make(map[string]bool), - updateCalls: make(map[string]int), - revisionMiss: make(map[string]int), - deleted: make(map[string]int), - } -} - -func (f *fakeIdentityKVClient) Get(_ context.Context, in *proto.GetRequest, _ ...grpc.CallOption) (*proto.GetResponse, error) { - f.mu.Lock() - defer f.mu.Unlock() - - entry, ok := f.entries[in.Key] - if !ok { - return &proto.GetResponse{Found: false}, nil - } - - valueCopy := append([]byte(nil), entry.value...) - return &proto.GetResponse{Value: valueCopy, Found: true, Revision: entry.revision}, nil -} - -func (f *fakeIdentityKVClient) PutIfAbsent(_ context.Context, in *proto.PutRequest, _ ...grpc.CallOption) (*proto.PutResponse, error) { - f.mu.Lock() - defer f.mu.Unlock() - - if _, exists := f.entries[in.Key]; exists { - return nil, status.Error(codes.AlreadyExists, "exists") - } - - f.entries[in.Key] = &fakeKVEntry{value: append([]byte(nil), in.Value...), revision: 1} - return &proto.PutResponse{}, nil -} - -func (f *fakeIdentityKVClient) Update(_ context.Context, in *proto.UpdateRequest, _ ...grpc.CallOption) (*proto.UpdateResponse, error) { - f.mu.Lock() - defer f.mu.Unlock() - - key := in.Key - f.updateCalls[key]++ - - if remaining, ok := f.failUpdateOnce[in.Key]; ok && remaining > 0 { - f.failUpdateOnce[in.Key] = remaining - 1 - return nil, status.Error(codes.Aborted, "conflict") - } - - entry, ok := f.entries[in.Key] - if !ok { - return nil, status.Error(codes.NotFound, "missing") - } - - if in.Revision != entry.revision { - f.revisionMiss[key]++ - return nil, status.Error(codes.Aborted, "revision mismatch") - } - - entry.revision++ - entry.value = append(entry.value[:0], in.Value...) - - if f.omitUpdateResp[key] { - return nil, nil - } - - return &proto.UpdateResponse{Revision: entry.revision}, nil -} - -func (f *fakeIdentityKVClient) Delete(_ context.Context, in *proto.DeleteRequest, _ ...grpc.CallOption) (*proto.DeleteResponse, error) { - f.mu.Lock() - defer f.mu.Unlock() - - if _, ok := f.entries[in.Key]; !ok { - return nil, status.Error(codes.NotFound, "missing") - } - - delete(f.entries, in.Key) - f.deleted[in.Key]++ - return &proto.DeleteResponse{}, nil -} - -func TestIdentityPublisherPublishesNewEntries(t *testing.T) { - t.Parallel() - - kvClient := newFakeIdentityKVClient() - pub := newIdentityPublisher(kvClient, identitymap.DefaultNamespace, 0, logger.NewTestLogger()) - - update := &models.DeviceUpdate{ - DeviceID: "device-1", - IP: "10.0.0.10", - Partition: "tenant-a", - Metadata: map[string]string{ - "armis_device_id": "armis-1", - }, - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{update})) - - expectedKeys := identitymap.BuildKeys(update) - require.Len(t, kvClient.entries, len(expectedKeys)) - require.Equal(t, int64(len(expectedKeys)), pub.metrics.publishedKeys.Load()) - - for _, key := range expectedKeys { - entry, ok := kvClient.entries[key.KeyPath(identitymap.DefaultNamespace)] - require.Truef(t, ok, "missing key %s", key) - - record, err := identitymap.UnmarshalRecord(entry.value) - require.NoError(t, err) - require.Equal(t, update.DeviceID, record.CanonicalDeviceID) - require.Equal(t, identitymap.HashIdentityMetadata(update), record.MetadataHash) - require.Equal(t, "armis-1", record.Attributes["armis_device_id"]) - } -} - -func TestIdentityPublisherSkipsUnchangedRecords(t *testing.T) { - t.Parallel() - - kvClient := newFakeIdentityKVClient() - pub := newIdentityPublisher(kvClient, identitymap.DefaultNamespace, 0, logger.NewTestLogger()) - - update := &models.DeviceUpdate{ - DeviceID: "device-static", - Metadata: map[string]string{"armis_device_id": "armis-static"}, - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{update})) - - initialKeys := len(kvClient.entries) - initialWrites := pub.metrics.publishedKeys.Load() - - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{update})) - - for _, entry := range kvClient.entries { - require.Equal(t, uint64(1), entry.revision) - } - require.Len(t, kvClient.entries, initialKeys) - require.Equal(t, initialWrites, pub.metrics.publishedKeys.Load()) -} - -func TestIdentityPublisherRetriesOnCASConflict(t *testing.T) { - t.Parallel() - - kvClient := newFakeIdentityKVClient() - pub := newIdentityPublisher(kvClient, identitymap.DefaultNamespace, 0, logger.NewTestLogger()) - - initial := &models.DeviceUpdate{DeviceID: "device-conflict"} - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{initial})) - - conflictKey := identitymap.Key{Kind: identitymap.KindDeviceID, Value: initial.DeviceID}.KeyPath(identitymap.DefaultNamespace) - kvClient.failUpdateOnce[conflictKey] = 1 - - updated := &models.DeviceUpdate{ - DeviceID: initial.DeviceID, - Metadata: map[string]string{"armis_device_id": "armis-new"}, - } - - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{updated})) - - entry := kvClient.entries[conflictKey] - require.Equal(t, uint64(2), entry.revision) - record, err := identitymap.UnmarshalRecord(entry.value) - require.NoError(t, err) - require.Equal(t, identitymap.HashIdentityMetadata(updated), record.MetadataHash) -} - -func TestIdentityPublisherRecoversFromCorruptedRecord(t *testing.T) { - t.Parallel() - - kvClient := newFakeIdentityKVClient() - pub := newIdentityPublisher(kvClient, identitymap.DefaultNamespace, 0, logger.NewTestLogger()) - - deviceID := "device-corrupted" - canonicalKey := identitymap.Key{Kind: identitymap.KindDeviceID, Value: deviceID}.KeyPath(identitymap.DefaultNamespace) - kvClient.entries[canonicalKey] = &fakeKVEntry{ - value: []byte("totally-not-a-proto"), - revision: 3, - } - - update := &models.DeviceUpdate{ - DeviceID: deviceID, - Partition: "tenant-corrupt", - IP: "10.99.0.18", - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - err := pub.Publish(ctx, []*models.DeviceUpdate{update}) - require.NoError(t, err) - - entry, ok := kvClient.entries[canonicalKey] - require.True(t, ok, "canonical key missing after publish") - - record, err := identitymap.UnmarshalRecord(entry.value) - require.NoError(t, err) - require.Equal(t, update.DeviceID, record.CanonicalDeviceID) - require.Equal(t, update.Partition, record.Partition) - require.Equal(t, identitymap.HashIdentityMetadata(update), record.MetadataHash) - require.NotZero(t, record.UpdatedAt.UnixMilli()) - require.Equal(t, "10.99.0.18", record.Attributes["ip"]) -} - -func TestIdentityPublisherUpdatesWhenAttributesChange(t *testing.T) { - t.Parallel() - - kvClient := newFakeIdentityKVClient() - pub := newIdentityPublisher(kvClient, identitymap.DefaultNamespace, 0, logger.NewTestLogger()) - - metadata := map[string]string{"armis_device_id": "armis-attr"} - initialRecord := &identitymap.Record{ - CanonicalDeviceID: "device-attr", - MetadataHash: identitymap.HashIdentityMetadata(&models.DeviceUpdate{ - DeviceID: "device-attr", - Metadata: metadata, - }), - Attributes: map[string]string{}, - } - payload, err := identitymap.MarshalRecord(initialRecord) - require.NoError(t, err) - - key := identitymap.Key{Kind: identitymap.KindDeviceID, Value: "device-attr"}.KeyPath(identitymap.DefaultNamespace) - kvClient.entries[key] = &fakeKVEntry{value: payload, revision: 1} - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - update := &models.DeviceUpdate{DeviceID: "device-attr", Metadata: metadata} - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{update})) - - entry := kvClient.entries[key] - require.Equal(t, uint64(2), entry.revision) - record, err := identitymap.UnmarshalRecord(entry.value) - require.NoError(t, err) - require.Equal(t, "armis-attr", record.Attributes["armis_device_id"]) -} - -func TestIdentityPublisherDeletesStaleIdentityKeys(t *testing.T) { - t.Parallel() - - kvClient := newFakeIdentityKVClient() - pub := newIdentityPublisher(kvClient, identitymap.DefaultNamespace, 0, logger.NewTestLogger()) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - initial := &models.DeviceUpdate{ - DeviceID: "device-stale", - IP: "10.0.0.10", - Partition: "tenant-a", - } - - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{initial})) - - initialKeys := identitymap.BuildKeys(initial) - ipKey := identitymap.Key{Kind: identitymap.KindIP, Value: initial.IP}.KeyPath(identitymap.DefaultNamespace) - partIPKey := identitymap.Key{Kind: identitymap.KindPartitionIP, Value: "tenant-a:10.0.0.10"}.KeyPath(identitymap.DefaultNamespace) - require.Contains(t, kvClient.entries, ipKey) - require.Contains(t, kvClient.entries, partIPKey) - require.Len(t, kvClient.entries, len(initialKeys)) - - updated := &models.DeviceUpdate{ - DeviceID: initial.DeviceID, - IP: "10.0.0.11", - Partition: initial.Partition, - } - - require.NoError(t, pub.Publish(ctx, []*models.DeviceUpdate{updated})) - - _, ipStillPresent := kvClient.entries[ipKey] - _, partIPStillPresent := kvClient.entries[partIPKey] - require.False(t, ipStillPresent, "expected old IP key to be deleted") - require.False(t, partIPStillPresent, "expected old partition IP key to be deleted") - - newKeys := identitymap.BuildKeys(updated) - require.Len(t, kvClient.entries, len(newKeys)) - require.Equal(t, int64(2), pub.metrics.deletedKeys.Load()) -} diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index af39744a6..547581fe5 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -65,7 +65,6 @@ type DeviceRegistry struct { db db.Service logger logger.Logger identityEngine *IdentityEngine - identityPublisher *identityPublisher firstSeenLookupChunkSize int identityCfg *models.IdentityReconciliationConfig graphWriter GraphWriter @@ -265,11 +264,6 @@ func (r *DeviceRegistry) ProcessBatchDeviceUpdates(ctx context.Context, updates } } - // Publish identity map for downstream consumers - if len(valid) > 0 { - r.publishIdentityMap(ctx, valid) - } - // Annotate first_seen timestamps if err := r.annotateFirstSeen(ctx, valid); err != nil { r.logger.Warn().Err(err).Msg("Failed to annotate _first_seen metadata") diff --git a/pkg/registry/registry_test.go b/pkg/registry/registry_test.go index 50afc8918..ab1560902 100644 --- a/pkg/registry/registry_test.go +++ b/pkg/registry/registry_test.go @@ -47,7 +47,7 @@ func allowCanonicalizationQueries(mockDB *db.MockService) { AnyTimes() // IdentityEngine calls BatchGetDeviceIDsByIdentifier to resolve strong identifiers mockDB.EXPECT(). - BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any()). + BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(nil, nil). AnyTimes() // IdentityEngine calls UpsertDeviceIdentifiers to persist identifier mappings @@ -1285,7 +1285,7 @@ func TestProcessBatchDeviceUpdates_MergesSweepIntoCanonicalDevice(t *testing.T) // IdentityEngine mocks mockDB.EXPECT(). - BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any()). + BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(nil, nil). AnyTimes() mockDB.EXPECT(). @@ -1345,7 +1345,7 @@ func TestReconcileSightingsMergesSweepSightingsByIP(t *testing.T) { // IdentityEngine mocks mockDB.EXPECT(). - BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any()). + BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(nil, nil). AnyTimes() mockDB.EXPECT(). @@ -1451,7 +1451,7 @@ func TestReconcileSightingsPromotesEligibleSightings(t *testing.T) { Return([]*models.UnifiedDevice{}, nil). AnyTimes() mockDB.EXPECT(). - BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any()). + BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(nil, nil). AnyTimes() @@ -1732,7 +1732,7 @@ func TestProcessBatchDeviceUpdates_SweepAttachedToCanonicalGetsMetadata(t *testi AnyTimes() mockDB.EXPECT(). - BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any()). + BatchGetDeviceIDsByIdentifier(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(nil, nil). AnyTimes() diff --git a/pkg/sweeper/base_processor.go b/pkg/sweeper/base_processor.go index 092f51b05..14e4af40f 100644 --- a/pkg/sweeper/base_processor.go +++ b/pkg/sweeper/base_processor.go @@ -301,7 +301,7 @@ func (p *BaseProcessor) Process(result *models.Result) error { case models.ModeTCP: p.processTCPResult(shard, host, result) - + case models.ModeTCPConnect: p.processTCPResult(shard, host, result) } @@ -439,7 +439,7 @@ func (p *BaseProcessor) collectShardSummaries() []shardSummary { summary.icmpHosts++ } - summary.hosts = append(summary.hosts, *host) + summary.hosts = append(summary.hosts, models.DeepCopyHostResult(host)) } summary.totalHosts = len(shard.hostMap) @@ -789,7 +789,7 @@ func (p *BaseProcessor) processShardForSummary( // Stream host data and count for _, host := range shard.hostMap { select { - case hostCh <- *host: + case hostCh <- models.DeepCopyHostResult(host): if host.Available { result.availableHosts++ } diff --git a/pkg/sweeper/memory_store.go b/pkg/sweeper/memory_store.go index ca958367b..611fddfa4 100644 --- a/pkg/sweeper/memory_store.go +++ b/pkg/sweeper/memory_store.go @@ -89,14 +89,14 @@ func WithoutPreallocation() InMemoryStoreOption { // InMemoryStore implements Store interface for temporary storage. type InMemoryStore struct { // Sharded to reduce lock contention under high write rates - shards []*storeShard - shardCount int - processor ResultProcessor - maxResults int - cleanupDone chan struct{} - lastCleanup time.Time + shards []*storeShard + shardCount int + processor ResultProcessor + maxResults int + cleanupDone chan struct{} + lastCleanup time.Time cleanupInterval time.Duration - logger logger.Logger + logger logger.Logger } // storeShard holds a partition of results and its own lock. @@ -134,13 +134,13 @@ func NewInMemoryStore(processor ResultProcessor, log logger.Logger, opts ...InMe } s := &InMemoryStore{ - shards: make([]*storeShard, shards), - shardCount: shards, - processor: processor, - maxResults: cfg.maxResults, - cleanupDone: cleanupChan, + shards: make([]*storeShard, shards), + shardCount: shards, + processor: processor, + maxResults: cfg.maxResults, + cleanupDone: cleanupChan, cleanupInterval: cfg.cleanupInterval, - logger: log, + logger: log, } // Pre-allocate per-shard capacity to reduce growslice. @@ -405,7 +405,7 @@ func (*InMemoryStore) updateHostTimestamps(host *models.HostResult, r *models.Re func (*InMemoryStore) convertToSlice(hostMap map[string]*models.HostResult) []models.HostResult { hosts := make([]models.HostResult, 0, len(hostMap)) for _, host := range hostMap { - hosts = append(hosts, *host) + hosts = append(hosts, models.DeepCopyHostResult(host)) } return hosts @@ -508,7 +508,7 @@ func (*InMemoryStore) buildSummary( availableHosts++ } - hosts = append(hosts, *host) + hosts = append(hosts, models.DeepCopyHostResult(host)) } ports := make([]models.PortCount, 0, len(portCounts)) diff --git a/pkg/sweeper/summary_snapshot_test.go b/pkg/sweeper/summary_snapshot_test.go new file mode 100644 index 000000000..67853f218 --- /dev/null +++ b/pkg/sweeper/summary_snapshot_test.go @@ -0,0 +1,157 @@ +package sweeper + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/carverauto/serviceradar/pkg/logger" + "github.com/carverauto/serviceradar/pkg/models" +) + +func TestGetSummary_HostResultsDoNotAliasInternalState(t *testing.T) { + cfg := &models.Config{Ports: []int{80}} + processor := NewBaseProcessor(cfg, logger.NewTestLogger()) + + hostAddr := "192.168.1.1" + + if err := processor.Process(&models.Result{ + Target: models.Target{Host: hostAddr, Mode: models.ModeICMP}, + Available: true, + RespTime: 10 * time.Millisecond, + }); err != nil { + t.Fatalf("process icmp: %v", err) + } + + if err := processor.Process(&models.Result{ + Target: models.Target{Host: hostAddr, Port: 80, Mode: models.ModeTCP}, + Available: true, + RespTime: 5 * time.Millisecond, + }); err != nil { + t.Fatalf("process tcp: %v", err) + } + + internal := processor.GetHostMap()[hostAddr] + if internal == nil { + t.Fatalf("expected internal host to exist") + } + if internal.ICMPStatus == nil || internal.PortMap == nil || len(internal.PortResults) == 0 { + t.Fatalf("expected internal host to have ICMPStatus and port state") + } + + summary, err := processor.GetSummary(context.Background()) + if err != nil { + t.Fatalf("get summary: %v", err) + } + if len(summary.Hosts) != 1 { + t.Fatalf("expected 1 host in summary, got %d", len(summary.Hosts)) + } + + snapshot := summary.Hosts[0] + + if snapshot.ICMPStatus == nil { + t.Fatalf("expected snapshot ICMPStatus to be non-nil") + } + if snapshot.ICMPStatus == internal.ICMPStatus { + t.Fatalf("expected ICMPStatus to be deep-copied") + } + + if snapshot.PortMap == nil { + t.Fatalf("expected snapshot PortMap to be non-nil") + } + if len(snapshot.PortResults) == 0 { + t.Fatalf("expected snapshot PortResults to be non-empty") + } + + for port, internalPR := range internal.PortMap { + snapshotPR := snapshot.PortMap[port] + if internalPR == nil || snapshotPR == nil { + continue + } + if internalPR == snapshotPR { + t.Fatalf("expected PortMap[%d] to be deep-copied", port) + } + } + + internalPointers := make(map[*models.PortResult]struct{}, len(internal.PortResults)) + for _, pr := range internal.PortResults { + internalPointers[pr] = struct{}{} + } + for _, pr := range snapshot.PortResults { + if _, ok := internalPointers[pr]; ok { + t.Fatalf("expected PortResults to be deep-copied (found shared pointer)") + } + } + + for _, pr := range snapshot.PortResults { + if pr == nil { + continue + } + if got := snapshot.PortMap[pr.Port]; got != pr { + t.Fatalf("expected snapshot PortMap[%d] to reference the same PortResult pointer as PortResults", pr.Port) + } + } +} + +func TestGetSummary_ConcurrentReadsDoNotPanic(t *testing.T) { + cfg := &models.Config{Ports: []int{80}} + processor := NewBaseProcessor(cfg, logger.NewTestLogger()) + + hostAddr := "192.168.1.1" + + if err := processor.Process(&models.Result{ + Target: models.Target{Host: hostAddr, Port: 80, Mode: models.ModeTCP}, + Available: true, + RespTime: 5 * time.Millisecond, + }); err != nil { + t.Fatalf("process tcp: %v", err) + } + + summary, err := processor.GetSummary(context.Background()) + if err != nil { + t.Fatalf("get summary: %v", err) + } + if len(summary.Hosts) != 1 { + t.Fatalf("expected 1 host in summary, got %d", len(summary.Hosts)) + } + snapshot := summary.Hosts[0] + + stop := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-stop: + return + default: + } + + if snapshot.PortMap != nil { + _ = snapshot.PortMap[80] + _ = len(snapshot.PortMap) + } + + if len(snapshot.PortResults) > 0 && snapshot.PortResults[0] != nil { + _ = snapshot.PortResults[0].Port + } + } + }() + + for i := 0; i < 250; i++ { + if err := processor.Process(&models.Result{ + Target: models.Target{Host: hostAddr, Port: 10000 + i, Mode: models.ModeTCP}, + Available: true, + RespTime: 1 * time.Millisecond, + }); err != nil { + close(stop) + wg.Wait() + t.Fatalf("process tcp iteration %d: %v", i, err) + } + } + + close(stop) + wg.Wait() +} diff --git a/pkg/sync/integrations/armis/armis_test.go b/pkg/sync/integrations/armis/armis_test.go index 48e59aa2c..1483099df 100644 --- a/pkg/sync/integrations/armis/armis_test.go +++ b/pkg/sync/integrations/armis/armis_test.go @@ -33,7 +33,6 @@ import ( "go.uber.org/mock/gomock" "google.golang.org/grpc" - "github.com/carverauto/serviceradar/pkg/identitymap" "github.com/carverauto/serviceradar/pkg/logger" "github.com/carverauto/serviceradar/pkg/models" "github.com/carverauto/serviceradar/proto" @@ -1206,16 +1205,14 @@ func (*fakeKVClient) ListKeys(context.Context, *proto.ListKeysRequest, ...grpc.C } func TestProcessDevices_DoesNotHydrateCanonicalMetadata(t *testing.T) { - canonical := &identitymap.Record{CanonicalDeviceID: "canon-99", Partition: "prod", MetadataHash: "hash"} - payload, err := identitymap.MarshalRecord(canonical) - require.NoError(t, err) - fake := &fakeKVClient{ getFn: func(ctx context.Context, req *proto.GetRequest, _ ...grpc.CallOption) (*proto.GetResponse, error) { - if strings.Contains(req.Key, "/armis-id/1") { - return &proto.GetResponse{Value: payload, Found: true, Revision: 11}, nil - } - return &proto.GetResponse{Found: false}, nil + t.Fatalf("processDevices should not issue KV identity lookups (Get): %s", req.GetKey()) + return nil, nil + }, + batchGetFn: func(ctx context.Context, req *proto.BatchGetRequest, _ ...grpc.CallOption) (*proto.BatchGetResponse, error) { + t.Fatalf("processDevices should not issue KV identity lookups (BatchGet): %v", req.GetKeys()) + return nil, nil }, } @@ -1240,45 +1237,6 @@ func TestProcessDevices_DoesNotHydrateCanonicalMetadata(t *testing.T) { require.Empty(t, data) } -func TestProcessDevices_SkipsCanonicalPrefetch(t *testing.T) { - fake := &fakeKVClient{} - var captured [][]string - fake.batchGetFn = func(ctx context.Context, req *proto.BatchGetRequest, _ ...grpc.CallOption) (*proto.BatchGetResponse, error) { - keys := append([]string(nil), req.GetKeys()...) - captured = append(captured, keys) - resp := &proto.BatchGetResponse{Results: make([]*proto.BatchGetEntry, len(keys))} - for i, key := range keys { - resp.Results[i] = &proto.BatchGetEntry{Key: key, Found: false} - } - return resp, nil - } - - integ := &ArmisIntegration{ - Config: &models.SourceConfig{AgentID: "agent", PollerID: "poller", Partition: "part"}, - KVClient: fake, - Logger: logger.NewTestLogger(), - } - - devices := []Device{ - {ID: 1, IPAddress: "10.0.0.1,10.0.0.2", MacAddress: "aa:bb:cc:dd:ee:ff", Name: "dev1"}, - {ID: 2, IPAddress: "10.0.1.1", MacAddress: "11:22:33:44:55:66", Name: "dev2"}, - } - - labels := map[int]string{ - 1: "query1", - 2: "query2", - } - - queries := map[int]models.QueryConfig{ - 1: {Label: "query1", SweepModes: []models.SweepMode{models.ModeTCP}}, - 2: {Label: "query2", SweepModes: []models.SweepMode{models.ModeICMP}}, - } - - _, _, events, _ := integ.processDevices(context.Background(), devices, labels, queries) - require.Len(t, events, 2) - require.Empty(t, captured, "sync should not issue KV BatchGet calls for canonical prefetch") -} - func TestPrepareArmisUpdateFromDeviceStates(t *testing.T) { integ := &ArmisIntegration{Logger: logger.NewTestLogger()} states := []DeviceState{ diff --git a/pkg/sync/integrations/armis/devices.go b/pkg/sync/integrations/armis/devices.go index 1f36930e8..0f112b2fd 100644 --- a/pkg/sync/integrations/armis/devices.go +++ b/pkg/sync/integrations/armis/devices.go @@ -30,7 +30,6 @@ import ( "strings" "time" - "github.com/carverauto/serviceradar/pkg/identitymap" "github.com/carverauto/serviceradar/pkg/models" "github.com/carverauto/serviceradar/proto" ) @@ -860,26 +859,20 @@ func (d *DefaultArmisIntegration) FetchDevicesPage( // processDevices converts devices to KV data and extracts IPs. type armisDeviceContext struct { - device *Device - label string - query models.QueryConfig - primaryIP string - allIPs []string - event *models.DeviceUpdate - keys []identitymap.Key - orderedKeys []identitymap.Key + device *Device + label string + query models.QueryConfig + primaryIP string + allIPs []string + event *models.DeviceUpdate } func (a *ArmisIntegration) processDevices( - ctx context.Context, + _ context.Context, devices []Device, deviceLabels map[int]string, deviceQueries map[int]models.QueryConfig, ) (data map[string][]byte, ips []string, events []*models.DeviceUpdate, deviceTargets []models.DeviceTarget) { - if ctx == nil { - ctx = context.Background() - } - data = make(map[string][]byte) ips = make([]string, 0, len(devices)*2) // Allocate more space for multiple IPs per device events = make([]*models.DeviceUpdate, 0, len(devices)) @@ -902,46 +895,20 @@ func (a *ArmisIntegration) processDevices( primaryIP := allIPs[0] event := a.createDeviceUpdateEventWithAllIPs(d, primaryIP, allIPs, deviceLabels[d.ID], now) - keys := identitymap.BuildKeys(event) - ordered := identitymap.PrioritizeKeys(keys) contexts = append(contexts, armisDeviceContext{ - device: d, - label: deviceLabels[d.ID], - query: deviceQueries[d.ID], - primaryIP: primaryIP, - allIPs: allIPs, - event: event, - keys: keys, - orderedKeys: ordered, + device: d, + label: deviceLabels[d.ID], + query: deviceQueries[d.ID], + primaryIP: primaryIP, + allIPs: allIPs, + event: event, }) ips = append(ips, primaryIP+"/32") } - entries, fetchErr := a.prefetchCanonicalEntries(ctx, contexts) - for _, ctxDevice := range contexts { - var ( - canonicalRecord *identitymap.Record - revision uint64 - ) - - if len(entries) != 0 { - canonicalRecord, revision = a.resolveCanonicalRecord(entries, ctxDevice.orderedKeys) - } - - if canonicalRecord == nil && fetchErr != nil { - if rec, rev := a.lookupCanonicalRecordDirect(ctx, ctxDevice.keys); rec != nil { - canonicalRecord = rec - revision = rev - } - } - - if canonicalRecord != nil { - a.attachCanonicalMetadata(ctxDevice.event, canonicalRecord, revision) - } - events = append(events, ctxDevice.event) deviceTarget := models.DeviceTarget{ @@ -961,22 +928,6 @@ func (a *ArmisIntegration) processDevices( }, } - if canonicalRecord != nil { - if deviceTarget.Metadata == nil { - deviceTarget.Metadata = make(map[string]string) - } - deviceTarget.Metadata["canonical_device_id"] = canonicalRecord.CanonicalDeviceID - if canonicalRecord.Partition != "" { - deviceTarget.Metadata["canonical_partition"] = canonicalRecord.Partition - } - if canonicalRecord.MetadataHash != "" { - deviceTarget.Metadata["canonical_metadata_hash"] = canonicalRecord.MetadataHash - } - if revision != 0 { - deviceTarget.Metadata["canonical_revision"] = strconv.FormatUint(revision, 10) - } - } - deviceTargets = append(deviceTargets, deviceTarget) a.Logger.Debug(). @@ -990,113 +941,6 @@ func (a *ArmisIntegration) processDevices( return data, ips, events, deviceTargets } -func (a *ArmisIntegration) prefetchCanonicalEntries(ctx context.Context, contexts []armisDeviceContext) (map[string]*proto.BatchGetEntry, error) { - // Canonical identity resolution now happens centrally in the core registry. - // The sync service should avoid hammering the KV store with speculative reads. - return nil, nil -} - -func (a *ArmisIntegration) resolveCanonicalRecord(entries map[string]*proto.BatchGetEntry, ordered []identitymap.Key) (*identitymap.Record, uint64) { - if len(entries) == 0 || len(ordered) == 0 { - return nil, 0 - } - - for _, key := range ordered { - sanitized := key.KeyPath(identitymap.DefaultNamespace) - entry, ok := entries[sanitized] - if !ok || !entry.GetFound() || len(entry.GetValue()) == 0 { - continue - } - - record, err := identitymap.UnmarshalRecord(entry.GetValue()) - if err != nil { - a.Logger.Debug(). - Err(err). - Str("identity_kind", key.Kind.String()). - Str("identity_value", key.Value). - Msg("Failed to unmarshal canonical identity record") - continue - } - - a.Logger.Debug(). - Str("identity_kind", key.Kind.String()). - Str("identity_value", key.Value). - Str("canonical_device_id", record.CanonicalDeviceID). - Msg("Resolved canonical identity for Armis device") - - return record, entry.GetRevision() - } - - return nil, 0 -} - -func (a *ArmisIntegration) lookupCanonicalRecordDirect(ctx context.Context, keys []identitymap.Key) (*identitymap.Record, uint64) { - if a.KVClient == nil || len(keys) == 0 { - return nil, 0 - } - - if ctx == nil { - ctx = context.Background() - } - - ordered := identitymap.PrioritizeKeys(keys) - if len(ordered) == 0 { - return nil, 0 - } - - seenPaths := make(map[string]struct{}, len(ordered)) - paths := make([]string, 0, len(ordered)) - for _, key := range ordered { - sanitized := key.KeyPath(identitymap.DefaultNamespace) - if _, ok := seenPaths[sanitized]; ok { - continue - } - seenPaths[sanitized] = struct{}{} - paths = append(paths, sanitized) - } - - resp, err := a.KVClient.BatchGet(ctx, &proto.BatchGetRequest{Keys: paths}) - if err != nil { - a.Logger.Debug().Err(err).Msg("Armis identity map lookup failed") - return nil, 0 - } - - entries := make(map[string]*proto.BatchGetEntry, len(resp.GetResults())) - for _, entry := range resp.GetResults() { - if entry == nil { - continue - } - entries[entry.GetKey()] = entry - } - - return a.resolveCanonicalRecord(entries, ordered) -} - -func (a *ArmisIntegration) attachCanonicalMetadata(event *models.DeviceUpdate, record *identitymap.Record, revision uint64) { - if event == nil || record == nil { - return - } - - if event.Metadata == nil { - event.Metadata = make(map[string]string) - } - - event.Metadata["canonical_device_id"] = record.CanonicalDeviceID - if record.Partition != "" { - event.Metadata["canonical_partition"] = record.Partition - } - if record.MetadataHash != "" { - event.Metadata["canonical_metadata_hash"] = record.MetadataHash - } - if revision != 0 { - event.Metadata["canonical_revision"] = strconv.FormatUint(revision, 10) - } - - if hostname, ok := record.Attributes["hostname"]; ok && hostname != "" { - event.Metadata["canonical_hostname"] = hostname - } -} - // createDeviceUpdateEventWithAllIPs creates a DeviceUpdate event with all IP addresses in metadata func (a *ArmisIntegration) createDeviceUpdateEventWithAllIPs( d *Device, primaryIP string, allIPs []string, queryLabel string, timestamp time.Time, diff --git a/pkg/sync/integrations/netbox/netbox.go b/pkg/sync/integrations/netbox/netbox.go index 9a018fbe8..a4a262dfb 100644 --- a/pkg/sync/integrations/netbox/netbox.go +++ b/pkg/sync/integrations/netbox/netbox.go @@ -28,7 +28,6 @@ import ( "strings" "time" - "github.com/carverauto/serviceradar/pkg/identitymap" "github.com/carverauto/serviceradar/pkg/models" "github.com/carverauto/serviceradar/proto" ) @@ -297,22 +296,16 @@ func (*NetboxIntegration) decodeResponse(resp *http.Response) (DeviceResponse, e // processDevices converts devices to KV data, extracts IPs, and returns the list of devices. type netboxDeviceContext struct { - device *Device - event *models.DeviceUpdate - keys []identitymap.Key - orderedKeys []identitymap.Key - network string + device *Device + event *models.DeviceUpdate + network string } -func (n *NetboxIntegration) processDevices(ctx context.Context, deviceResp DeviceResponse) ( +func (n *NetboxIntegration) processDevices(_ context.Context, deviceResp DeviceResponse) ( data map[string][]byte, ips []string, events []*models.DeviceUpdate, ) { - if ctx == nil { - ctx = context.Background() - } - data = make(map[string][]byte) ips = make([]string, 0, len(deviceResp.Results)) events = make([]*models.DeviceUpdate, 0, len(deviceResp.Results)) @@ -385,41 +378,14 @@ func (n *NetboxIntegration) processDevices(ctx context.Context, deviceResp Devic } } - keys := identitymap.BuildKeys(event) - ordered := identitymap.PrioritizeKeys(keys) - contexts = append(contexts, netboxDeviceContext{ - device: device, - event: event, - keys: keys, - orderedKeys: ordered, - network: network, + device: device, + event: event, + network: network, }) } - entries, fetchErr := n.prefetchCanonicalEntries(ctx, contexts) - for _, ctxDevice := range contexts { - var ( - record *identitymap.Record - revision uint64 - ) - - if len(entries) != 0 { - record, revision = n.resolveCanonicalRecord(entries, ctxDevice.orderedKeys) - } - - if record == nil && fetchErr != nil { - if fallback, rev := n.lookupCanonicalRecordDirect(ctx, ctxDevice.keys); fallback != nil { - record = fallback - revision = rev - } - } - - if record != nil { - n.attachCanonicalMetadata(ctxDevice.event, record, revision) - } - metaJSON, err := json.Marshal(ctxDevice.event.Metadata) if err == nil { n.Logger.Debug(). @@ -433,112 +399,6 @@ func (n *NetboxIntegration) processDevices(ctx context.Context, deviceResp Devic return data, ips, events } -func (n *NetboxIntegration) prefetchCanonicalEntries(ctx context.Context, contexts []netboxDeviceContext) (map[string]*proto.BatchGetEntry, error) { - // Canonical identity resolution is performed by the core registry. Avoid KV reads here. - return nil, nil -} - -func (n *NetboxIntegration) resolveCanonicalRecord(entries map[string]*proto.BatchGetEntry, ordered []identitymap.Key) (*identitymap.Record, uint64) { - if len(entries) == 0 || len(ordered) == 0 { - return nil, 0 - } - - for _, key := range ordered { - sanitized := key.KeyPath(identitymap.DefaultNamespace) - entry, ok := entries[sanitized] - if !ok || !entry.GetFound() || len(entry.GetValue()) == 0 { - continue - } - - record, err := identitymap.UnmarshalRecord(entry.GetValue()) - if err != nil { - n.Logger.Debug(). - Err(err). - Str("identity_kind", key.Kind.String()). - Str("identity_value", key.Value). - Msg("Failed to unmarshal canonical identity record") - continue - } - - n.Logger.Debug(). - Str("identity_kind", key.Kind.String()). - Str("identity_value", key.Value). - Str("canonical_device_id", record.CanonicalDeviceID). - Msg("Resolved canonical identity for NetBox device") - - return record, entry.GetRevision() - } - - return nil, 0 -} - -func (n *NetboxIntegration) lookupCanonicalRecordDirect(ctx context.Context, keys []identitymap.Key) (*identitymap.Record, uint64) { - if n.KvClient == nil || len(keys) == 0 { - return nil, 0 - } - - if ctx == nil { - ctx = context.Background() - } - - ordered := identitymap.PrioritizeKeys(keys) - if len(ordered) == 0 { - return nil, 0 - } - - seenPaths := make(map[string]struct{}, len(ordered)) - paths := make([]string, 0, len(ordered)) - for _, key := range ordered { - sanitized := key.KeyPath(identitymap.DefaultNamespace) - if _, ok := seenPaths[sanitized]; ok { - continue - } - seenPaths[sanitized] = struct{}{} - paths = append(paths, sanitized) - } - - resp, err := n.KvClient.BatchGet(ctx, &proto.BatchGetRequest{Keys: paths}) - if err != nil { - n.Logger.Debug().Err(err).Msg("NetBox identity map lookup failed") - return nil, 0 - } - - entries := make(map[string]*proto.BatchGetEntry, len(resp.GetResults())) - for _, entry := range resp.GetResults() { - if entry == nil { - continue - } - entries[entry.GetKey()] = entry - } - - return n.resolveCanonicalRecord(entries, ordered) -} - -func (n *NetboxIntegration) attachCanonicalMetadata(event *models.DeviceUpdate, record *identitymap.Record, revision uint64) { - if event == nil || record == nil { - return - } - - if event.Metadata == nil { - event.Metadata = make(map[string]string) - } - - event.Metadata["canonical_device_id"] = record.CanonicalDeviceID - if record.Partition != "" { - event.Metadata["canonical_partition"] = record.Partition - } - if record.MetadataHash != "" { - event.Metadata["canonical_metadata_hash"] = record.MetadataHash - } - if revision != 0 { - event.Metadata["canonical_revision"] = strconv.FormatUint(revision, 10) - } - - if hostname, ok := record.Attributes["hostname"]; ok && hostname != "" { - event.Metadata["canonical_hostname"] = hostname - } -} - // writeSweepConfig generates and writes the sweep Config to KV. func (n *NetboxIntegration) writeSweepConfig(ctx context.Context, ips []string) { if n.KvClient == nil { diff --git a/pkg/sync/integrations/netbox/netbox_test.go b/pkg/sync/integrations/netbox/netbox_test.go index 1a85517dd..339a8b782 100644 --- a/pkg/sync/integrations/netbox/netbox_test.go +++ b/pkg/sync/integrations/netbox/netbox_test.go @@ -2,13 +2,11 @@ package netbox import ( "context" - "strings" "testing" "github.com/stretchr/testify/require" "google.golang.org/grpc" - "github.com/carverauto/serviceradar/pkg/identitymap" "github.com/carverauto/serviceradar/pkg/logger" "github.com/carverauto/serviceradar/pkg/models" "github.com/carverauto/serviceradar/proto" @@ -118,16 +116,14 @@ func (*fakeKVClient) ListKeys(context.Context, *proto.ListKeysRequest, ...grpc.C } func TestProcessDevices_DoesNotHydrateCanonicalMetadata(t *testing.T) { - canonical := &identitymap.Record{CanonicalDeviceID: "canonical-42", Partition: "prod", MetadataHash: "hash"} - payload, err := identitymap.MarshalRecord(canonical) - require.NoError(t, err) - fake := &fakeKVClient{ getFn: func(ctx context.Context, req *proto.GetRequest, _ ...grpc.CallOption) (*proto.GetResponse, error) { - if strings.Contains(req.Key, "/netbox-id/1") { - return &proto.GetResponse{Value: payload, Found: true, Revision: 7}, nil - } - return &proto.GetResponse{Found: false}, nil + t.Fatalf("processDevices should not issue KV identity lookups (Get): %s", req.GetKey()) + return nil, nil + }, + batchGetFn: func(ctx context.Context, req *proto.BatchGetRequest, _ ...grpc.CallOption) (*proto.BatchGetResponse, error) { + t.Fatalf("processDevices should not issue KV identity lookups (BatchGet): %v", req.GetKeys()) + return nil, nil }, } @@ -163,65 +159,6 @@ func TestProcessDevices_DoesNotHydrateCanonicalMetadata(t *testing.T) { require.Empty(t, data) } -func TestProcessDevices_SkipsCanonicalPrefetch(t *testing.T) { - fake := &fakeKVClient{} - var captured [][]string - fake.batchGetFn = func(ctx context.Context, req *proto.BatchGetRequest, _ ...grpc.CallOption) (*proto.BatchGetResponse, error) { - keys := append([]string(nil), req.GetKeys()...) - captured = append(captured, keys) - resp := &proto.BatchGetResponse{Results: make([]*proto.BatchGetEntry, len(keys))} - for i, key := range keys { - resp.Results[i] = &proto.BatchGetEntry{Key: key, Found: false} - } - return resp, nil - } - - integ := &NetboxIntegration{ - Config: &models.SourceConfig{AgentID: "agent", PollerID: "poller", Partition: "part"}, - KvClient: fake, - Logger: logger.NewTestLogger(), - } - - resp := DeviceResponse{Results: []Device{ - { - ID: 1, - Name: "host1", - Role: struct { - ID int "json:\"id\"" - Name string "json:\"name\"" - }{ID: 1, Name: "role"}, - Site: struct { - ID int "json:\"id\"" - Name string "json:\"name\"" - }{ID: 1, Name: "site"}, - PrimaryIP4: struct { - ID int "json:\"id\"" - Address string "json:\"address\"" - }{ID: 1, Address: "10.0.0.1/32"}, - }, - { - ID: 2, - Name: "host2", - Role: struct { - ID int "json:\"id\"" - Name string "json:\"name\"" - }{ID: 2, Name: "role"}, - Site: struct { - ID int "json:\"id\"" - Name string "json:\"name\"" - }{ID: 2, Name: "site"}, - PrimaryIP4: struct { - ID int "json:\"id\"" - Address string "json:\"address\"" - }{ID: 2, Address: "10.0.0.2/32"}, - }, - }} - - _, _, events := integ.processDevices(context.Background(), resp) - require.Len(t, events, 2) - require.Empty(t, captured, "sync should not issue KV BatchGet calls") -} - func TestParseTCPPorts(t *testing.T) { tests := []struct { name string diff --git a/rust/config-bootstrap/Cargo.toml b/rust/config-bootstrap/Cargo.toml index cf105148e..6015c72b6 100644 --- a/rust/config-bootstrap/Cargo.toml +++ b/rust/config-bootstrap/Cargo.toml @@ -14,7 +14,7 @@ tokio = { workspace = true, features = ["rt-multi-thread", "macros", "fs", "time serde = { version = "1", features = ["derive"] } serde_json = "1" toml = "0.8" -thiserror = "1" +thiserror = "2" tracing = "0.1" kvutil = { path = "../kvutil" } diff --git a/rust/edge-onboarding/Cargo.toml b/rust/edge-onboarding/Cargo.toml index 8ac029cee..bc324e26c 100644 --- a/rust/edge-onboarding/Cargo.toml +++ b/rust/edge-onboarding/Cargo.toml @@ -13,7 +13,7 @@ path = "src/lib.rs" serde = { version = "1", features = ["derive"] } serde_json = "1" base64 = "0.22" -thiserror = "1" +thiserror = "2" tracing = "0.1" ureq = { version = "2", features = ["json"] } urlencoding = "2" diff --git a/rust/kvutil/Cargo.toml b/rust/kvutil/Cargo.toml index 360cc3ac0..5464ce172 100644 --- a/rust/kvutil/Cargo.toml +++ b/rust/kvutil/Cargo.toml @@ -13,7 +13,7 @@ tonic = { workspace = true } prost = { workspace = true } serde = { version = "1", features = ["derive"] } serde_json = "1" -thiserror = "1" +thiserror = "2" tokio-stream = { workspace = true } toml = "0.8" spiffe = { version = "0.6", default-features = false, features = ["spiffe-types", "workload-api"] } diff --git a/rust/srql/Cargo.toml b/rust/srql/Cargo.toml index fbcc62cec..596defc7d 100644 --- a/rust/srql/Cargo.toml +++ b/rust/srql/Cargo.toml @@ -20,7 +20,7 @@ pq-sys = { version = "0.7.5", features = ["bundled"] } serde = { version = "1.0.214", features = ["derive"] } serde_json = "1.0.132" serde_with = "3.11.0" -thiserror = "1.0.69" +thiserror = "2.0.17" tokio = { version = "1.41.0", features = ["full"] } tower = "0.5.1" tower-http = { version = "0.6.1", features = ["trace"] } diff --git a/rust/srql/src/config.rs b/rust/srql/src/config.rs index afca19d3d..a7750c738 100644 --- a/rust/srql/src/config.rs +++ b/rust/srql/src/config.rs @@ -12,6 +12,8 @@ pub struct AppConfig { pub database_url: String, pub max_pool_size: u32, pub pg_ssl_root_cert: Option<String>, + pub pg_ssl_cert: Option<String>, + pub pg_ssl_key: Option<String>, pub api_key: Option<String>, pub api_key_kv_key: Option<String>, pub allowed_origins: Option<Vec<String>>, @@ -119,6 +121,8 @@ impl AppConfig { database_url, max_pool_size: raw.srql_max_pool_size, pg_ssl_root_cert: env::var("PGSSLROOTCERT").ok(), + pg_ssl_cert: env::var("PGSSLCERT").ok(), + pg_ssl_key: env::var("PGSSLKEY").ok(), api_key: raw.srql_api_key, api_key_kv_key: raw.srql_api_key_kv_key, allowed_origins, @@ -129,6 +133,25 @@ impl AppConfig { rate_limit_window: Duration::from_secs(raw.srql_rate_limit_window_secs.max(1)), }) } + + pub fn embedded(database_url: String) -> Self { + Self { + listen_addr: "127.0.0.1:0".parse().expect("valid socket addr"), + database_url, + max_pool_size: default_pool_size(), + pg_ssl_root_cert: None, + pg_ssl_cert: None, + pg_ssl_key: None, + api_key: None, + api_key_kv_key: None, + allowed_origins: None, + default_limit: default_limit(), + max_limit: default_max_limit(), + request_timeout: Duration::from_secs(default_timeout_secs()), + rate_limit_max_requests: default_rate_limit_requests(), + rate_limit_window: Duration::from_secs(default_rate_limit_window_secs()), + } + } } fn resolve_addr( diff --git a/rust/srql/src/db.rs b/rust/srql/src/db.rs index 079ffb0d7..2b75adac0 100644 --- a/rust/srql/src/db.rs +++ b/rust/srql/src/db.rs @@ -3,6 +3,7 @@ use anyhow::{Context, Result}; use async_trait::async_trait; use bb8::{ManageConnection, Pool}; use diesel_async::{AsyncPgConnection, SimpleAsyncConnection}; +use rustls::pki_types::{CertificateDer, PrivateKeyDer}; use rustls::{ClientConfig, RootCertStore}; use rustls_pemfile::certs; use std::fs::File; @@ -14,8 +15,12 @@ use tracing::{error, info}; pub type PgPool = Pool<PgConnectionManager>; pub async fn connect_pool(config: &AppConfig) -> Result<PgPool> { - let manager = - PgConnectionManager::new(&config.database_url, config.pg_ssl_root_cert.as_deref())?; + let manager = PgConnectionManager::new( + &config.database_url, + config.pg_ssl_root_cert.as_deref(), + config.pg_ssl_cert.as_deref(), + config.pg_ssl_key.as_deref(), + )?; let pool = Pool::builder() .max_size(config.max_pool_size) .build(manager) @@ -44,12 +49,17 @@ enum PgTls { } impl PgConnectionManager { - fn new(database_url: &str, root_cert: Option<&str>) -> Result<Self> { + fn new( + database_url: &str, + root_cert: Option<&str>, + client_cert: Option<&str>, + client_key: Option<&str>, + ) -> Result<Self> { let config = database_url .parse::<PgConfig>() .context("invalid DATABASE_URL")?; let tls = if let Some(path) = root_cert { - PgTls::Rustls(build_tls_connector(path)?) + PgTls::Rustls(build_tls_connector(path, client_cert, client_key)?) } else { PgTls::None }; @@ -90,8 +100,12 @@ impl ManageConnection for PgConnectionManager { } } -fn build_tls_connector(path: &str) -> Result<MakeRustlsConnect> { - let mut reader = BufReader::new(File::open(path).context("failed to open PGSSLROOTCERT")?); +fn build_tls_connector( + root_cert: &str, + client_cert: Option<&str>, + client_key: Option<&str>, +) -> Result<MakeRustlsConnect> { + let mut reader = BufReader::new(File::open(root_cert).context("failed to open PGSSLROOTCERT")?); let mut root_store = RootCertStore::empty(); for cert in certs(&mut reader) { let cert = cert.context("failed to parse PGSSLROOTCERT")?; @@ -99,8 +113,61 @@ fn build_tls_connector(path: &str) -> Result<MakeRustlsConnect> { .add(cert) .map_err(|_| anyhow::anyhow!("invalid certificate in PGSSLROOTCERT"))?; } - let config = ClientConfig::builder() - .with_root_certificates(root_store) - .with_no_client_auth(); - Ok(MakeRustlsConnect::new(config)) + + Ok(MakeRustlsConnect::new(build_client_config( + root_store, + root_cert, + client_cert, + client_key, + )?)) +} + +fn build_client_config( + root_store: RootCertStore, + root_cert: &str, + client_cert: Option<&str>, + client_key: Option<&str>, +) -> Result<ClientConfig> { + let builder = ClientConfig::builder().with_root_certificates(root_store); + + match (client_cert, client_key) { + (None, None) => Ok(builder.with_no_client_auth()), + (Some(cert), Some(key)) => { + let certs = load_client_certs(cert)?; + let key = load_client_key(key)?; + builder + .with_client_auth_cert(certs, key) + .with_context(|| format!("failed to build client TLS config for {root_cert}")) + } + _ => anyhow::bail!("PGSSLCERT and PGSSLKEY must both be set (or neither)"), + } +} + +fn load_client_certs(path: &str) -> Result<Vec<CertificateDer<'static>>> { + let mut reader = BufReader::new( + File::open(path).with_context(|| format!("failed to open PGSSLCERT file '{path}'"))?, + ); + + let mut chain = Vec::new(); + for cert in certs(&mut reader) { + chain.push(cert.context("failed to parse PGSSLCERT")?); + } + + if chain.is_empty() { + anyhow::bail!("PGSSLCERT contained no certificates"); + } + + Ok(chain) +} + +fn load_client_key(path: &str) -> Result<PrivateKeyDer<'static>> { + let mut reader = BufReader::new( + File::open(path).with_context(|| format!("failed to open PGSSLKEY file '{path}'"))?, + ); + + let key = rustls_pemfile::private_key(&mut reader) + .context("failed to parse PGSSLKEY")? + .context("PGSSLKEY contained no private keys")?; + + Ok(key) } diff --git a/rust/srql/src/lib.rs b/rust/srql/src/lib.rs index edb065ed2..b66692352 100644 --- a/rust/srql/src/lib.rs +++ b/rust/srql/src/lib.rs @@ -15,8 +15,27 @@ pub mod time; use crate::{config::AppConfig, server::Server}; +pub use crate::query::{ + QueryDirection, QueryEngine, QueryRequest, QueryResponse, TranslateRequest, TranslateResponse, +}; + /// Bootstraps the SRQL service using environment configuration. pub async fn run() -> anyhow::Result<()> { let config = AppConfig::from_env()?; Server::new(config).await?.run().await } + +#[derive(Clone)] +pub struct EmbeddedSrql { + pub query: QueryEngine, +} + +impl EmbeddedSrql { + pub async fn new(config: AppConfig) -> anyhow::Result<Self> { + let pool = db::connect_pool(&config).await?; + let config = std::sync::Arc::new(config); + Ok(Self { + query: QueryEngine::new(pool, config), + }) + } +} diff --git a/rust/srql/src/models.rs b/rust/srql/src/models.rs index de6c76113..3ec50f843 100644 --- a/rust/srql/src/models.rs +++ b/rust/srql/src/models.rs @@ -361,6 +361,7 @@ pub struct OtelMetricRow { pub is_slow: Option<bool>, pub component: Option<String>, pub level: Option<String>, + pub unit: Option<String>, pub created_at: DateTime<Utc>, } @@ -385,6 +386,7 @@ impl OtelMetricRow { "is_slow": self.is_slow, "component": self.component, "level": self.level, + "unit": self.unit, }) } } diff --git a/rust/srql/src/parser.rs b/rust/srql/src/parser.rs index 328a4e6fd..86b312aff 100644 --- a/rust/srql/src/parser.rs +++ b/rust/srql/src/parser.rs @@ -11,6 +11,7 @@ pub enum Entity { DeviceUpdates, Interfaces, DeviceGraph, + GraphCypher, Events, Logs, Services, @@ -35,10 +36,28 @@ pub struct QueryAst { pub limit: Option<i64>, pub time_filter: Option<TimeFilterSpec>, pub stats: Option<String>, + pub downsample: Option<DownsampleSpec>, } const MAX_STATS_EXPR_LEN: usize = 1024; const MAX_FILTER_LIST_VALUES: usize = 200; +const MAX_DOWNSAMPLE_BUCKET_SECS: i64 = 31 * 24 * 60 * 60; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum DownsampleAgg { + Avg, + Min, + Max, + Sum, + Count, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DownsampleSpec { + pub bucket_seconds: i64, + pub agg: DownsampleAgg, + pub series: Option<String>, +} #[derive(Debug, Clone)] pub struct Filter { @@ -106,8 +125,12 @@ pub fn parse(input: &str) -> Result<QueryAst> { let mut limit = None; let mut time_filter = None; let mut stats = None; + let mut downsample_bucket_seconds: Option<i64> = None; + let mut downsample_agg = DownsampleAgg::Avg; + let mut downsample_series: Option<String> = None; - for token in tokenize(input) { + let mut tokens = tokenize(input).into_iter().peekable(); + while let Some(token) = tokens.next() { let (raw_key, raw_value) = split_token(&token)?; let key = raw_key.trim().to_lowercase(); let value = parse_value(raw_value); @@ -134,8 +157,49 @@ pub fn parse(input: &str) -> Result<QueryAst> { "time" | "timeframe" => { time_filter = Some(parse_time_value(value.as_scalar()?)?); } + "bucket" | "downsample" => { + downsample_bucket_seconds = Some(parse_bucket_seconds(value.as_scalar()?)?); + } + "agg" => { + downsample_agg = parse_downsample_agg(value.as_scalar()?)?; + } + "series" => { + downsample_series = normalize_optional_string(value.as_scalar()?); + } "stats" => { - let expr = value.as_scalar()?.to_string(); + let mut expr = value.as_scalar()?.to_string(); + + if tokens + .peek() + .is_some_and(|next| next.as_str().eq_ignore_ascii_case("as")) + { + let _ = tokens.next(); + let alias_token = tokens.next().ok_or_else(|| { + ServiceError::InvalidRequest( + "stats aliases must be of the form 'stats:expr as alias'".into(), + ) + })?; + if alias_token.contains(':') { + return Err(ServiceError::InvalidRequest( + "stats aliases must be of the form 'stats:expr as alias'".into(), + )); + } + + let alias = alias_token + .trim() + .trim_matches('"') + .trim_matches('\'') + .to_string(); + if alias.is_empty() { + return Err(ServiceError::InvalidRequest( + "stats aliases must be of the form 'stats:expr as alias'".into(), + )); + } + + expr.push_str(" as "); + expr.push_str(&alias); + } + if expr.trim().len() > MAX_STATS_EXPR_LEN { return Err(ServiceError::InvalidRequest(format!( "stats expression must be <= {MAX_STATS_EXPR_LEN} characters" @@ -164,6 +228,12 @@ pub fn parse(input: &str) -> Result<QueryAst> { ServiceError::InvalidRequest("queries must include an in:<entity> token".into()) })?; + let downsample = downsample_bucket_seconds.map(|bucket_seconds| DownsampleSpec { + bucket_seconds, + agg: downsample_agg, + series: downsample_series, + }); + Ok(QueryAst { entity, filters, @@ -171,6 +241,7 @@ pub fn parse(input: &str) -> Result<QueryAst> { limit, time_filter, stats, + downsample, }) } @@ -179,6 +250,7 @@ fn parse_entity(raw: &str) -> Result<Entity> { match normalized.as_str() { "devices" | "device" | "device_inventory" => Ok(Entity::Devices), "device_graph" | "devicegraph" | "graph" => Ok(Entity::DeviceGraph), + "graph_cypher" | "graphcypher" | "cypher" => Ok(Entity::GraphCypher), "device_updates" | "device_update" | "updates" => Ok(Entity::DeviceUpdates), "interfaces" | "interface" | "discovered_interfaces" => Ok(Entity::Interfaces), "events" | "activity" => Ok(Entity::Events), @@ -203,6 +275,70 @@ fn parse_entity(raw: &str) -> Result<Entity> { } } +fn parse_bucket_seconds(raw: &str) -> Result<i64> { + let raw = raw.trim(); + if raw.is_empty() { + return Err(ServiceError::InvalidRequest( + "bucket requires a duration like 5m, 1h".into(), + )); + } + + let raw = raw.to_lowercase(); + let (number_part, unit_part) = raw.split_at(raw.len().saturating_sub(1)); + let value = number_part + .parse::<i64>() + .map_err(|_| ServiceError::InvalidRequest("bucket duration must be an integer".into()))?; + + if value <= 0 { + return Err(ServiceError::InvalidRequest( + "bucket duration must be positive".into(), + )); + } + + let multiplier = match unit_part { + "s" => 1, + "m" => 60, + "h" => 60 * 60, + "d" => 24 * 60 * 60, + _ => { + return Err(ServiceError::InvalidRequest( + "bucket supports only s|m|h|d suffixes".into(), + )) + } + }; + + let seconds = value.saturating_mul(multiplier); + if seconds <= 0 || seconds > MAX_DOWNSAMPLE_BUCKET_SECS { + return Err(ServiceError::InvalidRequest(format!( + "bucket duration must be between 1s and {}d", + MAX_DOWNSAMPLE_BUCKET_SECS / (24 * 60 * 60) + ))); + } + Ok(seconds) +} + +fn parse_downsample_agg(raw: &str) -> Result<DownsampleAgg> { + match raw.trim().to_lowercase().as_str() { + "avg" | "mean" => Ok(DownsampleAgg::Avg), + "min" => Ok(DownsampleAgg::Min), + "max" => Ok(DownsampleAgg::Max), + "sum" => Ok(DownsampleAgg::Sum), + "count" => Ok(DownsampleAgg::Count), + other => Err(ServiceError::InvalidRequest(format!( + "unsupported agg '{other}' (use avg|min|max|sum|count)" + ))), + } +} + +fn normalize_optional_string(raw: &str) -> Option<String> { + let value = raw.trim().trim_matches('"').trim_matches('\'').trim(); + if value.is_empty() { + None + } else { + Some(value.to_string()) + } +} + fn build_filter(key: &str, value: FilterValue) -> Filter { let mut field = key.trim(); let mut negated = false; @@ -463,6 +599,25 @@ mod tests { assert_eq!(ast.stats.as_deref(), Some("count() as total")); } + #[test] + fn parses_unquoted_stats_alias() { + let ast = parse("in:devices stats:count() as total").unwrap(); + assert_eq!(ast.stats.as_deref(), Some("count() as total")); + } + + #[test] + fn parses_unquoted_stats_alias_with_following_tokens() { + let ast = parse("in:devices stats:count() as total time:last_7d").unwrap(); + assert_eq!(ast.stats.as_deref(), Some("count() as total")); + assert!(ast.time_filter.is_some()); + } + + #[test] + fn rejects_stats_alias_missing_identifier() { + let err = parse("in:devices stats:count() as").unwrap_err(); + assert!(matches!(err, ServiceError::InvalidRequest(_))); + } + #[test] fn parses_interfaces_entity() { let ast = parse("in:interfaces time:last_24h").unwrap(); diff --git a/rust/srql/src/query/cpu_metrics.rs b/rust/srql/src/query/cpu_metrics.rs index bd95c9063..11ff5efa2 100644 --- a/rust/srql/src/query/cpu_metrics.rs +++ b/rust/srql/src/query/cpu_metrics.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::CpuMetricRow, @@ -88,16 +88,42 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(CpuMetricRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; if let Some(spec) = parse_stats_spec(plan.stats.as_deref())? { let sql = build_stats_query(plan, &spec)?; - return Ok(sql.sql); + let params = sql.binds.into_iter().map(bind_param_from_stats).collect(); + return Ok((rewrite_placeholders(&sql.sql), params)); } - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -129,6 +155,58 @@ fn base_query(plan: &QueryPlan) -> Result<CpuQuery<'static>> { Ok(query) } +fn bind_param_from_stats(value: SqlBindValue) -> BindParam { + match value { + SqlBindValue::Text(value) => BindParam::Text(value), + SqlBindValue::TextArray(values) => BindParam::TextArray(values), + SqlBindValue::Int(value) => BindParam::Int(i64::from(value)), + SqlBindValue::Float(value) => BindParam::Float(value), + SqlBindValue::Timestamp(value) => BindParam::timestamptz(value), + } +} + +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" | "cluster" | "label" => { + collect_text_params(params, filter) + } + "core_id" => { + params.push(BindParam::Int(i64::from(parse_i32( + filter.value.as_scalar()?, + )?))); + Ok(()) + } + "usage_percent" | "frequency_hz" => { + params.push(BindParam::Float(parse_f64(filter.value.as_scalar()?)?)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for cpu_metrics: '{other}'" + ))), + } +} + fn apply_filter<'a>(mut query: CpuQuery<'a>, filter: &Filter) -> Result<CpuQuery<'a>> { match filter.field.as_str() { "poller_id" => { @@ -661,6 +739,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: Some(stats.to_string()), + downsample: None, } } @@ -680,6 +759,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); diff --git a/rust/srql/src/query/device_graph.rs b/rust/srql/src/query/device_graph.rs index fa10ba296..e1b7b671c 100644 --- a/rust/srql/src/query/device_graph.rs +++ b/rust/srql/src/query/device_graph.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, parser::{Entity, FilterOp}, @@ -24,9 +24,17 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(row.map(|r| vec![r.result]).unwrap_or_default()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - Ok(DEVICE_GRAPH_QUERY.trim().to_string()) + let params = extract_params(plan)?; + Ok(( + DEVICE_GRAPH_QUERY.trim().to_string(), + vec![ + BindParam::Text(params.device_id), + BindParam::Bool(params.collector_owned_only), + BindParam::Bool(params.include_topology), + ], + )) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { diff --git a/rust/srql/src/query/device_updates.rs b/rust/srql/src/query/device_updates.rs index eabec3183..6b78f102f 100644 --- a/rust/srql/src/query/device_updates.rs +++ b/rust/srql/src/query/device_updates.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::DeviceUpdateRow, @@ -38,10 +38,35 @@ pub(super) async fn execute( Ok(rows.into_iter().map(DeviceUpdateRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -127,6 +152,55 @@ fn apply_filter<'a>( Ok(query) } +fn collect_text_params( + params: &mut Vec<BindParam>, + filter: &Filter, + allow_lists: bool, +) -> Result<()> { + match filter.op { + crate::parser::FilterOp::Eq + | crate::parser::FilterOp::NotEq + | crate::parser::FilterOp::Like + | crate::parser::FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + crate::parser::FilterOp::In | crate::parser::FilterOp::NotIn if allow_lists => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + crate::parser::FilterOp::In | crate::parser::FilterOp::NotIn => Err( + ServiceError::InvalidRequest("list filters are not supported for this field".into()), + ), + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "device_id" | "poller_id" | "agent_id" | "partition" | "discovery_source" | "source" => { + collect_text_params(params, filter, true) + } + "ip" | "mac" | "hostname" => collect_text_params(params, filter, false), + "available" | "is_available" => { + let value = filter.value.as_scalar()?.to_lowercase(); + let bool_val = value == "true" || value == "1"; + params.push(BindParam::Bool(bool_val)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for device_updates: '{other}'" + ))), + } +} + fn apply_ordering<'a>( mut query: DeviceUpdatesQuery<'a>, order: &[OrderClause], diff --git a/rust/srql/src/query/devices.rs b/rust/srql/src/query/devices.rs index 8b72f3140..eba2a39d8 100644 --- a/rust/srql/src/query/devices.rs +++ b/rust/srql/src/query/devices.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::DeviceRow, @@ -16,7 +16,7 @@ use diesel::dsl::{not, sql}; use diesel::pg::Pg; use diesel::prelude::*; use diesel::query_builder::{AsQuery, BoxedSelectStatement, FromClause}; -use diesel::sql_types::{Array, Bool, Text}; +use diesel::sql_types::{Array, BigInt, Bool, Text}; use diesel::PgTextExpressionMethods; use diesel_async::{AsyncPgConnection, RunQueryDsl}; @@ -24,12 +24,24 @@ type UnifiedDevicesTable = crate::schema::unified_devices::table; type DeviceFromClause = FromClause<UnifiedDevicesTable>; type DeviceQuery<'a> = BoxedSelectStatement<'a, <UnifiedDevicesTable as AsQuery>::SqlType, DeviceFromClause, Pg>; +type DeviceStatsQuery<'a> = BoxedSelectStatement<'a, BigInt, DeviceFromClause, Pg>; pub(super) async fn execute( conn: &mut AsyncPgConnection, plan: &QueryPlan, ) -> Result<Vec<serde_json::Value>> { ensure_entity(plan)?; + + if let Some(spec) = parse_stats_spec(plan.stats.as_deref())? { + let query = build_stats_query(plan, &spec)?; + let values: Vec<i64> = query + .load(conn) + .await + .map_err(|err| ServiceError::Internal(err.into()))?; + let count = values.into_iter().next().unwrap_or(0); + return Ok(vec![serde_json::json!({ spec.alias: count })]); + } + let query = build_query(plan)?; let rows: Vec<DeviceRow> = query .limit(plan.limit) @@ -41,12 +53,65 @@ pub(super) async fn execute( Ok(rows.into_iter().map(DeviceRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - let sql = - diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string(); - Ok(sql) + if let Some(spec) = parse_stats_spec(plan.stats.as_deref())? { + let query = build_stats_query(plan, &spec)?; + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + return Ok((sql, params)); + } + + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -73,6 +138,72 @@ fn build_query(plan: &QueryPlan) -> Result<DeviceQuery<'static>> { Ok(query) } +#[derive(Debug, Clone)] +struct DeviceStatsSpec { + alias: String, +} + +fn parse_stats_spec(raw: Option<&str>) -> Result<Option<DeviceStatsSpec>> { + let raw = match raw { + Some(raw) if !raw.trim().is_empty() => raw.trim(), + _ => return Ok(None), + }; + + let tokens: Vec<&str> = raw.split_whitespace().collect(); + if tokens.len() < 3 { + return Err(ServiceError::InvalidRequest( + "stats expressions must be of the form 'count() as alias'".into(), + )); + } + + if !tokens[0].eq_ignore_ascii_case("count()") || !tokens[1].eq_ignore_ascii_case("as") { + return Err(ServiceError::InvalidRequest( + "devices stats only support count()".into(), + )); + } + + let alias = tokens[2] + .trim_matches('"') + .trim_matches('\'') + .to_lowercase(); + + if alias.is_empty() + || alias + .chars() + .any(|ch| !ch.is_ascii_alphanumeric() && ch != '_') + { + return Err(ServiceError::InvalidRequest( + "stats alias must be alphanumeric".into(), + )); + } + + if tokens.len() > 3 { + return Err(ServiceError::InvalidRequest( + "devices stats do not support grouping yet".into(), + )); + } + + Ok(Some(DeviceStatsSpec { alias })) +} + +fn build_stats_query( + plan: &QueryPlan, + spec: &DeviceStatsSpec, +) -> Result<DeviceStatsQuery<'static>> { + let mut query = unified_devices.into_boxed::<Pg>(); + + if let Some(TimeRange { start, end }) = &plan.time_range { + query = query.filter(col_last_seen.ge(*start).and(col_last_seen.le(*end))); + } + + for filter in &plan.filters { + query = apply_filter(query, filter)?; + } + + let select_sql = format!("coalesce(COUNT(*), 0) as {}", spec.alias); + Ok(query.select(sql::<BigInt>(&select_sql))) +} + fn apply_filter<'a>(mut query: DeviceQuery<'a>, filter: &Filter) -> Result<DeviceQuery<'a>> { match filter.field.as_str() { "device_id" => { @@ -182,6 +313,63 @@ fn apply_filter<'a>(mut query: DeviceQuery<'a>, filter: &Filter) -> Result<Devic Ok(query) } +fn collect_text_params( + params: &mut Vec<BindParam>, + filter: &Filter, + allow_lists: bool, +) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn if allow_lists => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => Err(ServiceError::InvalidRequest( + "list filters are not supported for this field".into(), + )), + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "device_id" => collect_text_params(params, filter, true), + "hostname" | "ip" | "mac" => collect_text_params(params, filter, false), + "poller_id" | "agent_id" | "device_type" | "service_type" | "service_status" => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + "is_available" => { + params.push(BindParam::Bool(parse_bool(filter.value.as_scalar()?)?)); + Ok(()) + } + "discovery_sources" => { + let values = match &filter.value { + FilterValue::Scalar(v) => vec![v.to_string()], + FilterValue::List(list) => list.clone(), + }; + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field '{other}'" + ))), + } +} + fn apply_ordering<'a>(mut query: DeviceQuery<'a>, order: &[OrderClause]) -> DeviceQuery<'a> { let mut applied = false; for clause in order { diff --git a/rust/srql/src/query/disk_metrics.rs b/rust/srql/src/query/disk_metrics.rs index 7423a5aa0..d0b4622b5 100644 --- a/rust/srql/src/query/disk_metrics.rs +++ b/rust/srql/src/query/disk_metrics.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::DiskMetricRow, @@ -37,10 +37,35 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(DiskMetricRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -140,6 +165,48 @@ fn apply_filter<'a>(mut query: DiskQuery<'a>, filter: &Filter) -> Result<DiskQue Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + crate::parser::FilterOp::Eq + | crate::parser::FilterOp::NotEq + | crate::parser::FilterOp::Like + | crate::parser::FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + crate::parser::FilterOp::In | crate::parser::FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" | "mount_point" + | "device_name" => collect_text_params(params, filter), + "usage_percent" => { + params.push(BindParam::Float(parse_f64(filter.value.as_scalar()?)?)); + Ok(()) + } + "total_bytes" | "used_bytes" | "available_bytes" => { + params.push(BindParam::Int(parse_i64(filter.value.as_scalar()?)?)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for disk_metrics: '{other}'" + ))), + } +} + fn apply_ordering<'a>(mut query: DiskQuery<'a>, order: &[OrderClause]) -> DiskQuery<'a> { let mut applied = false; for clause in order { @@ -252,6 +319,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); diff --git a/rust/srql/src/query/downsample.rs b/rust/srql/src/query/downsample.rs new file mode 100644 index 000000000..4d6b087f1 --- /dev/null +++ b/rust/srql/src/query/downsample.rs @@ -0,0 +1,521 @@ +use super::{BindParam, QueryPlan}; +use crate::{ + error::{Result, ServiceError}, + parser::{DownsampleAgg, Entity, Filter, FilterOp}, + time::TimeRange, +}; +use chrono::{DateTime, Utc}; +use diesel::deserialize::QueryableByName; +use diesel::pg::Pg; +use diesel::sql_query; +use diesel::sql_types::{Float8, Nullable, Text, Timestamptz}; +use diesel_async::{AsyncPgConnection, RunQueryDsl}; +use serde_json::Value; + +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { + let sql = build_sql(plan)?; + let params = build_params(plan)?; + Ok((rewrite_placeholders(&sql), params)) +} + +pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> Result<Vec<Value>> { + let sql = build_sql(plan)?; + let mut query = sql_query(rewrite_placeholders(&sql)).into_boxed::<Pg>(); + + for bind in build_bind_values(plan)? { + query = bind.apply(query); + } + + let rows: Vec<DownsampleRow> = query + .load(conn) + .await + .map_err(|err| ServiceError::Internal(err.into()))?; + + Ok(rows + .into_iter() + .map(|row| { + serde_json::json!({ + "timestamp": row.timestamp.to_rfc3339(), + "series": row.series, + "value": row.value, + }) + }) + .collect()) +} + +fn build_sql(plan: &QueryPlan) -> Result<String> { + let downsample = plan.downsample.as_ref().ok_or_else(|| { + ServiceError::InvalidRequest("downsample requires bucket:<duration>".into()) + })?; + + let (table, ts_col, value_col, forced_metric_type) = match plan.entity { + Entity::TimeseriesMetrics => ("timeseries_metrics", "timestamp", "value", None), + Entity::SnmpMetrics => ("timeseries_metrics", "timestamp", "value", Some("snmp")), + Entity::RperfMetrics => ("timeseries_metrics", "timestamp", "value", Some("rperf")), + Entity::CpuMetrics => ("cpu_metrics", "timestamp", "usage_percent", None), + Entity::MemoryMetrics => ("memory_metrics", "timestamp", "usage_percent", None), + Entity::DiskMetrics => ("disk_metrics", "timestamp", "usage_percent", None), + Entity::ProcessMetrics => ("process_metrics", "timestamp", "cpu_usage", None), + _ => { + return Err(ServiceError::InvalidRequest( + "downsample is only supported for metric entities".into(), + )) + } + }; + + let time_range = plan.time_range.as_ref().ok_or_else(|| { + ServiceError::InvalidRequest("downsample queries require time:<range>".into()) + })?; + + let series_expr = series_expr(plan, table)?; + let agg_expr = agg_expr(downsample.agg, value_col); + let bucket_secs = downsample.bucket_seconds; + + let mut clauses = Vec::new(); + clauses.push(format!("{ts_col} >= ?")); + clauses.push(format!("{ts_col} <= ?")); + + if let Some(metric_type) = forced_metric_type { + clauses.push("metric_type = ?".to_string()); + let _ = metric_type; + } + + for filter in &plan.filters { + let (clause, _) = filter_clause(&plan.entity, table, filter)?; + clauses.push(clause); + } + + let mut sql = format!( + "SELECT time_bucket(make_interval(secs => {bucket_secs}), {ts_col}) AS timestamp, {series_expr} AS series, {agg_expr} AS value\nFROM {table}\nWHERE ", + ); + sql.push_str(&clauses.join(" AND ")); + sql.push_str("\nGROUP BY 1, 2\nORDER BY 1 ASC\nLIMIT ? OFFSET ?"); + + let _ = time_range; + Ok(sql) +} + +fn build_params(plan: &QueryPlan) -> Result<Vec<BindParam>> { + Ok(build_bind_values(plan)? + .into_iter() + .map(|value| value.into_bind_param()) + .collect()) +} + +fn build_bind_values(plan: &QueryPlan) -> Result<Vec<SqlBindValue>> { + let mut binds = Vec::new(); + + let TimeRange { start, end } = plan.time_range.as_ref().ok_or_else(|| { + ServiceError::InvalidRequest("downsample queries require time:<range>".into()) + })?; + + binds.push(SqlBindValue::Timestamp(*start)); + binds.push(SqlBindValue::Timestamp(*end)); + + if matches!(plan.entity, Entity::SnmpMetrics) { + binds.push(SqlBindValue::Text("snmp".to_string())); + } else if matches!(plan.entity, Entity::RperfMetrics) { + binds.push(SqlBindValue::Text("rperf".to_string())); + } + + for filter in &plan.filters { + let (_, mut values) = filter_clause(&plan.entity, "unused", filter)?; + binds.append(&mut values); + } + + binds.push(SqlBindValue::BigInt(plan.limit)); + binds.push(SqlBindValue::BigInt(plan.offset)); + + Ok(binds) +} + +fn series_expr(plan: &QueryPlan, table: &str) -> Result<String> { + let downsample = plan.downsample.as_ref().ok_or_else(|| { + ServiceError::InvalidRequest("downsample requires bucket:<duration>".into()) + })?; + + let Some(series) = downsample.series.as_deref() else { + return Ok("NULL::text".to_string()); + }; + + let series = series.trim().to_lowercase(); + + let expr = match plan.entity { + Entity::TimeseriesMetrics | Entity::SnmpMetrics | Entity::RperfMetrics => { + match series.as_str() { + "metric_name" => "metric_name".to_string(), + "metric_type" => "metric_type".to_string(), + "device_id" => "device_id".to_string(), + "poller_id" => "poller_id".to_string(), + "agent_id" => "agent_id".to_string(), + "partition" => "partition".to_string(), + "target_device_ip" => "target_device_ip".to_string(), + "if_index" => "if_index::text".to_string(), + other => { + return Err(ServiceError::InvalidRequest(format!( + "unsupported series field '{other}' for {table}" + ))) + } + } + } + Entity::CpuMetrics => match series.as_str() { + "device_id" => "device_id".to_string(), + "host_id" => "host_id".to_string(), + "poller_id" => "poller_id".to_string(), + "agent_id" => "agent_id".to_string(), + "core_id" => "core_id::text".to_string(), + "label" => "label".to_string(), + "cluster" => "cluster".to_string(), + "partition" => "partition".to_string(), + other => { + return Err(ServiceError::InvalidRequest(format!( + "unsupported series field '{other}' for {table}" + ))) + } + }, + Entity::MemoryMetrics => match series.as_str() { + "device_id" => "device_id".to_string(), + "host_id" => "host_id".to_string(), + "poller_id" => "poller_id".to_string(), + "agent_id" => "agent_id".to_string(), + "partition" => "partition".to_string(), + other => { + return Err(ServiceError::InvalidRequest(format!( + "unsupported series field '{other}' for {table}" + ))) + } + }, + Entity::DiskMetrics => match series.as_str() { + "device_id" => "device_id".to_string(), + "host_id" => "host_id".to_string(), + "poller_id" => "poller_id".to_string(), + "agent_id" => "agent_id".to_string(), + "partition" => "partition".to_string(), + "mount_point" => "mount_point".to_string(), + "device_name" => "device_name".to_string(), + other => { + return Err(ServiceError::InvalidRequest(format!( + "unsupported series field '{other}' for {table}" + ))) + } + }, + Entity::ProcessMetrics => match series.as_str() { + "device_id" => "device_id".to_string(), + "host_id" => "host_id".to_string(), + "poller_id" => "poller_id".to_string(), + "agent_id" => "agent_id".to_string(), + "partition" => "partition".to_string(), + "name" => "name".to_string(), + "pid" => "pid::text".to_string(), + "status" => "status".to_string(), + other => { + return Err(ServiceError::InvalidRequest(format!( + "unsupported series field '{other}' for {table}" + ))) + } + }, + _ => { + return Err(ServiceError::InvalidRequest( + "downsample is only supported for metric entities".into(), + )) + } + }; + + Ok(format!("coalesce({expr}, '')")) +} + +fn agg_expr(agg: DownsampleAgg, value_col: &str) -> String { + match agg { + DownsampleAgg::Avg => format!("AVG({value_col})"), + DownsampleAgg::Min => format!("MIN({value_col})"), + DownsampleAgg::Max => format!("MAX({value_col})"), + DownsampleAgg::Sum => format!("SUM({value_col})"), + DownsampleAgg::Count => "COUNT(*)::double precision".to_string(), + } +} + +fn filter_clause( + entity: &Entity, + _table: &str, + filter: &Filter, +) -> Result<(String, Vec<SqlBindValue>)> { + match entity { + Entity::TimeseriesMetrics | Entity::SnmpMetrics | Entity::RperfMetrics => { + timeseries_filter_clause(filter) + } + Entity::CpuMetrics => cpu_filter_clause(filter), + Entity::MemoryMetrics => memory_filter_clause(filter), + Entity::DiskMetrics => disk_filter_clause(filter), + Entity::ProcessMetrics => process_filter_clause(filter), + _ => Err(ServiceError::InvalidRequest( + "downsample is only supported for metric entities".into(), + )), + } +} + +fn text_clause(column: &str, filter: &Filter) -> Result<(String, Vec<SqlBindValue>)> { + let mut binds = Vec::new(); + let clause = match filter.op { + FilterOp::Eq => { + binds.push(SqlBindValue::Text(filter.value.as_scalar()?.to_string())); + format!("{column} = ?") + } + FilterOp::NotEq => { + binds.push(SqlBindValue::Text(filter.value.as_scalar()?.to_string())); + format!("{column} <> ?") + } + FilterOp::Like => { + binds.push(SqlBindValue::Text(filter.value.as_scalar()?.to_string())); + format!("{column} ILIKE ?") + } + FilterOp::NotLike => { + binds.push(SqlBindValue::Text(filter.value.as_scalar()?.to_string())); + format!("NOT ({column} ILIKE ?)") + } + FilterOp::In => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(("1=0".to_string(), Vec::new())); + } + binds.push(SqlBindValue::TextArray(values)); + format!("{column} = ANY(?)") + } + FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(("1=1".to_string(), Vec::new())); + } + binds.push(SqlBindValue::TextArray(values)); + format!("{column} <> ALL(?)") + } + _ => { + return Err(ServiceError::InvalidRequest(format!( + "unsupported operator for {column}: {:?}", + filter.op + ))) + } + }; + Ok((clause, binds)) +} + +fn float_clause( + column: &str, + filter: &Filter, + allow_ranges: bool, +) -> Result<(String, Vec<SqlBindValue>)> { + let mut binds = Vec::new(); + let value = filter + .value + .as_scalar()? + .parse::<f64>() + .map_err(|_| ServiceError::InvalidRequest("invalid numeric value".into()))?; + let clause = match filter.op { + FilterOp::Eq => { + binds.push(SqlBindValue::Float(value)); + format!("{column} = ?") + } + FilterOp::NotEq => { + binds.push(SqlBindValue::Float(value)); + format!("{column} <> ?") + } + FilterOp::Gt if allow_ranges => { + binds.push(SqlBindValue::Float(value)); + format!("{column} > ?") + } + FilterOp::Gte if allow_ranges => { + binds.push(SqlBindValue::Float(value)); + format!("{column} >= ?") + } + FilterOp::Lt if allow_ranges => { + binds.push(SqlBindValue::Float(value)); + format!("{column} < ?") + } + FilterOp::Lte if allow_ranges => { + binds.push(SqlBindValue::Float(value)); + format!("{column} <= ?") + } + _ => { + return Err(ServiceError::InvalidRequest(format!( + "{column} filter does not support operator {:?}", + filter.op + ))) + } + }; + Ok((clause, binds)) +} + +fn int_clause( + column: &str, + filter: &Filter, + allow_ranges: bool, +) -> Result<(String, Vec<SqlBindValue>)> { + let mut binds = Vec::new(); + let value = filter + .value + .as_scalar()? + .parse::<i64>() + .map_err(|_| ServiceError::InvalidRequest("invalid integer value".into()))?; + + let clause = match filter.op { + FilterOp::Eq => { + binds.push(SqlBindValue::BigInt(value)); + format!("{column} = ?") + } + FilterOp::NotEq => { + binds.push(SqlBindValue::BigInt(value)); + format!("{column} <> ?") + } + FilterOp::Gt if allow_ranges => { + binds.push(SqlBindValue::BigInt(value)); + format!("{column} > ?") + } + FilterOp::Gte if allow_ranges => { + binds.push(SqlBindValue::BigInt(value)); + format!("{column} >= ?") + } + FilterOp::Lt if allow_ranges => { + binds.push(SqlBindValue::BigInt(value)); + format!("{column} < ?") + } + FilterOp::Lte if allow_ranges => { + binds.push(SqlBindValue::BigInt(value)); + format!("{column} <= ?") + } + _ => { + return Err(ServiceError::InvalidRequest(format!( + "{column} filter does not support operator {:?}", + filter.op + ))) + } + }; + + Ok((clause, binds)) +} + +fn timeseries_filter_clause(filter: &Filter) -> Result<(String, Vec<SqlBindValue>)> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "metric_name" | "metric_type" | "device_id" + | "target_device_ip" | "partition" => text_clause(filter.field.as_str(), filter), + "if_index" => int_clause("if_index", filter, false), + "value" => float_clause("value", filter, true), + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for downsample timeseries_metrics: '{other}'" + ))), + } +} + +fn cpu_filter_clause(filter: &Filter) -> Result<(String, Vec<SqlBindValue>)> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" | "cluster" | "label" => { + text_clause(filter.field.as_str(), filter) + } + "core_id" => int_clause("core_id", filter, false), + "usage_percent" => float_clause("usage_percent", filter, true), + "frequency_hz" => float_clause("frequency_hz", filter, true), + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for downsample cpu_metrics: '{other}'" + ))), + } +} + +fn memory_filter_clause(filter: &Filter) -> Result<(String, Vec<SqlBindValue>)> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" => { + text_clause(filter.field.as_str(), filter) + } + "usage_percent" => float_clause("usage_percent", filter, false), + "total_bytes" => int_clause("total_bytes", filter, false), + "used_bytes" => int_clause("used_bytes", filter, false), + "available_bytes" => int_clause("available_bytes", filter, false), + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for downsample memory_metrics: '{other}'" + ))), + } +} + +fn disk_filter_clause(filter: &Filter) -> Result<(String, Vec<SqlBindValue>)> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" | "mount_point" + | "device_name" => text_clause(filter.field.as_str(), filter), + "usage_percent" => float_clause("usage_percent", filter, false), + "total_bytes" => int_clause("total_bytes", filter, false), + "used_bytes" => int_clause("used_bytes", filter, false), + "available_bytes" => int_clause("available_bytes", filter, false), + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for downsample disk_metrics: '{other}'" + ))), + } +} + +fn process_filter_clause(filter: &Filter) -> Result<(String, Vec<SqlBindValue>)> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" | "name" | "status" + | "start_time" => text_clause(filter.field.as_str(), filter), + "pid" => int_clause("pid", filter, false), + "cpu_usage" => float_clause("cpu_usage", filter, true), + "memory_usage" => int_clause("memory_usage", filter, true), + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for downsample process_metrics: '{other}'" + ))), + } +} + +#[derive(Debug, QueryableByName)] +struct DownsampleRow { + #[diesel(sql_type = Timestamptz)] + timestamp: DateTime<Utc>, + #[diesel(sql_type = Nullable<Text>)] + series: Option<String>, + #[diesel(sql_type = Nullable<Float8>)] + value: Option<f64>, +} + +#[derive(Debug, Clone)] +enum SqlBindValue { + Text(String), + TextArray(Vec<String>), + Float(f64), + Timestamp(DateTime<Utc>), + BigInt(i64), +} + +impl SqlBindValue { + fn apply<'a>( + &self, + query: diesel::query_builder::BoxedSqlQuery<'a, Pg, diesel::query_builder::SqlQuery>, + ) -> diesel::query_builder::BoxedSqlQuery<'a, Pg, diesel::query_builder::SqlQuery> { + use diesel::sql_types::{Array, Float8, Int8, Text, Timestamptz}; + match self { + SqlBindValue::Text(value) => query.bind::<Text, _>(value.clone()), + SqlBindValue::TextArray(values) => query.bind::<Array<Text>, _>(values.clone()), + SqlBindValue::Float(value) => query.bind::<Float8, _>(*value), + SqlBindValue::Timestamp(value) => query.bind::<Timestamptz, _>(*value), + SqlBindValue::BigInt(value) => query.bind::<Int8, _>(*value), + } + } + + fn into_bind_param(self) -> BindParam { + match self { + SqlBindValue::Text(value) => BindParam::Text(value), + SqlBindValue::TextArray(values) => BindParam::TextArray(values), + SqlBindValue::Float(value) => BindParam::Float(value), + SqlBindValue::Timestamp(value) => BindParam::timestamptz(value), + SqlBindValue::BigInt(value) => BindParam::Int(value), + } + } +} + +fn rewrite_placeholders(sql: &str) -> String { + let mut result = String::with_capacity(sql.len()); + let mut index = 1; + for ch in sql.chars() { + if ch == '?' { + result.push('$'); + result.push_str(&index.to_string()); + index += 1; + } else { + result.push(ch); + } + } + result +} diff --git a/rust/srql/src/query/events.rs b/rust/srql/src/query/events.rs index 601004cf6..157a115f1 100644 --- a/rust/srql/src/query/events.rs +++ b/rust/srql/src/query/events.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::EventRow, @@ -39,10 +39,35 @@ pub(super) async fn execute( Ok(rows.into_iter().map(EventRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -127,6 +152,48 @@ fn apply_filter<'a>(mut query: EventsQuery<'a>, filter: &Filter) -> Result<Event Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + crate::parser::FilterOp::Eq + | crate::parser::FilterOp::NotEq + | crate::parser::FilterOp::Like + | crate::parser::FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + crate::parser::FilterOp::In | crate::parser::FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "id" | "type" | "source" | "subject" | "datacontenttype" | "remote_addr" | "host" + | "specversion" | "severity" | "short_message" | "version" => { + collect_text_params(params, filter) + } + "level" => { + params.push(BindParam::Int(i64::from(parse_i32( + filter.value.as_scalar()?, + )?))); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for events: '{other}'" + ))), + } +} + fn apply_ordering<'a>(mut query: EventsQuery<'a>, order: &[OrderClause]) -> EventsQuery<'a> { let mut applied = false; for clause in order { diff --git a/rust/srql/src/query/graph_cypher.rs b/rust/srql/src/query/graph_cypher.rs new file mode 100644 index 000000000..504e39f90 --- /dev/null +++ b/rust/srql/src/query/graph_cypher.rs @@ -0,0 +1,208 @@ +use super::{BindParam, QueryPlan}; +use crate::{ + error::{Result, ServiceError}, + parser::{Entity, FilterOp}, +}; +use diesel::deserialize::QueryableByName; +use diesel::pg::Pg; +use diesel::sql_query; +use diesel::sql_types::{Jsonb, Nullable}; +use diesel_async::{AsyncPgConnection, RunQueryDsl}; +use serde_json::Value; + +const GRAPH_NAME: &str = "serviceradar"; + +pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> Result<Vec<Value>> { + ensure_entity(plan)?; + let cypher = extract_cypher(plan)?; + + let sql = build_sql(&cypher); + let mut query = sql_query(rewrite_placeholders(&sql)).into_boxed::<Pg>(); + query = query.bind::<diesel::sql_types::Int8, _>(plan.limit); + query = query.bind::<diesel::sql_types::Int8, _>(plan.offset); + + let rows: Vec<CypherRow> = query + .load(conn) + .await + .map_err(|err| ServiceError::Internal(err.into()))?; + + Ok(rows.into_iter().filter_map(|row| row.result).collect()) +} + +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { + ensure_entity(plan)?; + let cypher = extract_cypher(plan)?; + + Ok(( + rewrite_placeholders(&build_sql(&cypher)), + vec![BindParam::Int(plan.limit), BindParam::Int(plan.offset)], + )) +} + +fn ensure_entity(plan: &QueryPlan) -> Result<()> { + match plan.entity { + Entity::GraphCypher => Ok(()), + _ => Err(ServiceError::InvalidRequest( + "entity not supported by graph_cypher".into(), + )), + } +} + +fn build_sql(cypher: &str) -> String { + let cypher = dollar_quote(cypher); + + format!( + "WITH _config AS (\n SELECT set_config('search_path', 'ag_catalog,pg_catalog,\"$user\",public', false)\n),\n_rows AS (\n SELECT (result::text)::jsonb AS r\n FROM ag_catalog.cypher('{GRAPH_NAME}', {cypher}) AS (result ag_catalog.agtype)\n LIMIT ? OFFSET ?\n)\nSELECT\n CASE\n WHEN jsonb_typeof(r) = 'object' AND (jsonb_exists(r, 'nodes') OR jsonb_exists(r, 'vertices')) AND jsonb_exists(r, 'edges') THEN r\n WHEN jsonb_typeof(r) = 'object' AND (jsonb_exists(r, 'start_id') OR jsonb_exists(r, 'end_id')) THEN jsonb_build_object(\n 'nodes', jsonb_build_array(\n jsonb_build_object('id', r->>'start_id', 'label', r->>'start_id'),\n jsonb_build_object('id', r->>'end_id', 'label', r->>'end_id')\n ),\n 'edges', jsonb_build_array(r)\n )\n WHEN jsonb_typeof(r) = 'object' AND jsonb_exists(r, 'id') THEN jsonb_build_object('nodes', jsonb_build_array(r), 'edges', '[]'::jsonb)\n ELSE jsonb_build_object('nodes', '[]'::jsonb, 'edges', '[]'::jsonb, 'rows', jsonb_build_array(r))\n END AS result\nFROM _rows" + ) +} + +fn dollar_quote(input: &str) -> String { + let mut attempt = 0usize; + + loop { + let tag = if attempt == 0 { + "srql".to_string() + } else { + format!("srql_{attempt}") + }; + + let delimiter = format!("${tag}$"); + if !input.contains(&delimiter) { + return format!("{delimiter}{input}{delimiter}"); + } + + attempt += 1; + } +} + +fn extract_cypher(plan: &QueryPlan) -> Result<String> { + let filter = plan + .filters + .iter() + .find(|f| f.field == "cypher") + .ok_or_else(|| { + ServiceError::InvalidRequest("graph_cypher requires cypher:\"...\"".into()) + })?; + + if !matches!(filter.op, FilterOp::Eq) { + return Err(ServiceError::InvalidRequest( + "cypher filter only supports equality".into(), + )); + } + + let raw = filter.value.as_scalar()?.trim(); + if raw.is_empty() { + return Err(ServiceError::InvalidRequest( + "cypher query cannot be empty".into(), + )); + } + + ensure_read_only(raw)?; + Ok(raw.to_string()) +} + +fn ensure_read_only(raw: &str) -> Result<()> { + let lower = raw.to_lowercase(); + if lower.contains(';') { + return Err(ServiceError::InvalidRequest( + "cypher queries must not contain ';'".into(), + )); + } + + for keyword in [ + "create", "merge", "set", "delete", "detach", "remove", "drop", "call", + ] { + if lower + .split(|c: char| !c.is_ascii_alphanumeric() && c != '_') + .any(|token| token == keyword) + { + return Err(ServiceError::InvalidRequest(format!( + "cypher queries must be read-only (found '{keyword}')" + ))); + } + } + + Ok(()) +} + +#[derive(Debug, QueryableByName)] +struct CypherRow { + #[diesel(sql_type = Nullable<Jsonb>)] + result: Option<Value>, +} + +fn rewrite_placeholders(sql: &str) -> String { + let mut result = Vec::with_capacity(sql.len()); + let mut index = 1usize; + + let mut i = 0usize; + let bytes = sql.as_bytes(); + let mut in_single_quote = false; + let mut dollar_delimiter: Option<Vec<u8>> = None; + + while i < bytes.len() { + if let Some(delimiter) = &dollar_delimiter { + if bytes[i..].starts_with(delimiter) { + result.extend_from_slice(delimiter); + i += delimiter.len(); + dollar_delimiter = None; + continue; + } + + result.push(bytes[i]); + i += 1; + continue; + } + + if in_single_quote { + if bytes[i] == b'\'' { + if i + 1 < bytes.len() && bytes[i + 1] == b'\'' { + result.extend_from_slice(b"''"); + i += 2; + continue; + } + + in_single_quote = false; + result.push(b'\''); + i += 1; + continue; + } + + result.push(bytes[i]); + i += 1; + continue; + } + + match bytes[i] { + b'\'' => { + in_single_quote = true; + result.push(b'\''); + i += 1; + } + b'$' => { + if let Some(rel_end) = bytes[i + 1..].iter().position(|b| *b == b'$') { + let end = i + 1 + rel_end; + let delimiter = bytes[i..=end].to_vec(); + result.extend_from_slice(&delimiter); + i = end + 1; + dollar_delimiter = Some(delimiter); + } else { + result.push(b'$'); + i += 1; + } + } + b'?' => { + result.push(b'$'); + result.extend_from_slice(index.to_string().as_bytes()); + index += 1; + i += 1; + } + _ => { + result.push(bytes[i]); + i += 1; + } + } + } + + String::from_utf8(result).expect("rewritten SQL must be valid UTF-8") +} diff --git a/rust/srql/src/query/interfaces.rs b/rust/srql/src/query/interfaces.rs index fe6d790fe..82de40e38 100644 --- a/rust/srql/src/query/interfaces.rs +++ b/rust/srql/src/query/interfaces.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::DiscoveredInterfaceRow, @@ -56,17 +56,55 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R .collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - if parse_stats_spec(plan.stats.as_deref())?.is_some() { + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + if let Some(spec) = parse_stats_spec(plan.stats.as_deref())? { let base = base_query(plan)?; - let sql = diesel::debug_query::<Pg, _>(&base.count()).to_string(); - return Ok(sql); + let count = base.count(); + let count_sql = super::diesel_sql(&count)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&count)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + let sql = format!("SELECT ({count_sql}) AS {}", spec.alias); + return Ok((sql, params)); } - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -221,6 +259,71 @@ fn apply_filter<'a>( Ok(query) } +fn collect_text_params( + params: &mut Vec<BindParam>, + filter: &Filter, + allow_lists: bool, +) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn if allow_lists => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => Err(ServiceError::InvalidRequest( + "list filters are not supported for this field".into(), + )), + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "device_id" | "poller_id" | "agent_id" | "if_name" | "if_descr" | "description" + | "if_alias" | "if_phys_address" | "mac" => collect_text_params(params, filter, true), + "device_ip" | "ip" => collect_text_params(params, filter, false), + "if_admin_status" | "if_oper_status" | "status" => { + params.push(BindParam::Int(i64::from(parse_i32( + filter.value.as_scalar()?, + )?))); + Ok(()) + } + "if_speed" | "speed" => { + params.push(BindParam::Int(parse_i64(filter.value.as_scalar()?)?)); + Ok(()) + } + "ip_addresses" | "ip_address" => { + let values: Vec<String> = match &filter.value { + FilterValue::Scalar(value) => vec![value.to_string()], + FilterValue::List(list) => list.clone(), + }; + if values.is_empty() { + return Ok(()); + } + if values.len() > MAX_IP_ADDRESS_FILTER_VALUES { + return Err(ServiceError::InvalidRequest(format!( + "ip_addresses filter supports at most {MAX_IP_ADDRESS_FILTER_VALUES} values" + ))); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field '{other}'" + ))), + } +} + fn apply_ordering<'a>( mut query: InterfacesQuery<'a>, order: &[OrderClause], @@ -361,7 +464,7 @@ mod tests { .expect("stats expected"); assert_eq!(spec.alias, "interface_count"); - let sql = to_debug_sql(&plan).expect("stats SQL should be generated"); + let (sql, _) = to_sql_and_params(&plan).expect("stats SQL should be generated"); assert!( sql.to_lowercase().contains("count("), "unexpected stats SQL: {}", @@ -387,6 +490,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: Some(stats.to_string()), + downsample: None, } } } diff --git a/rust/srql/src/query/logs.rs b/rust/srql/src/query/logs.rs index c70a1bd91..194bae58a 100644 --- a/rust/srql/src/query/logs.rs +++ b/rust/srql/src/query/logs.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::LogRow, @@ -52,10 +52,36 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(LogRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; + + if let Some(stats_sql) = build_stats_query(plan)? { + let sql = rewrite_placeholders(&stats_sql.sql); + let params = stats_sql + .binds + .into_iter() + .map(bind_param_from_stats) + .collect(); + return Ok((sql, params)); + } + let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let sql = super::diesel_sql(&query.limit(plan.limit).offset(plan.offset))?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + params.push(BindParam::Int(plan.limit)); + params.push(BindParam::Int(plan.offset)); + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -121,6 +147,77 @@ impl SqlBindValue { } } +fn bind_param_from_stats(value: SqlBindValue) -> BindParam { + match value { + SqlBindValue::Text(value) => BindParam::Text(value), + SqlBindValue::Int(value) => BindParam::Int(i64::from(value)), + SqlBindValue::Timestamp(value) => BindParam::timestamptz(value), + } +} + +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "trace_id" | "span_id" | "service_name" | "service_version" | "service_instance" + | "scope_name" | "scope_version" | "severity_text" | "severity" | "level" | "body" => { + collect_text_params(params, filter) + } + "severity_number" => match filter.op { + FilterOp::Eq | FilterOp::NotEq => { + let value = filter.value.as_scalar()?.parse::<i32>().map_err(|_| { + ServiceError::InvalidRequest("severity_number must be an integer".into()) + })?; + params.push(BindParam::Int(i64::from(value))); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values: Vec<i32> = filter + .value + .as_list()? + .iter() + .map(|v| v.parse::<i32>()) + .collect::<std::result::Result<Vec<_>, _>>() + .map_err(|_| { + ServiceError::InvalidRequest("severity_number list must be integers".into()) + })?; + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::IntArray( + values.into_iter().map(i64::from).collect(), + )); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest( + "severity_number filter does not support this operator".into(), + )), + }, + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for logs: '{other}'" + ))), + } +} + fn build_stats_query(plan: &QueryPlan) -> Result<Option<LogsStatsSql>> { let stats_raw = match plan.stats.as_ref() { Some(raw) if !raw.trim().is_empty() => raw.trim(), @@ -614,6 +711,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: Some(stats.to_string()), + downsample: None, } } @@ -633,6 +731,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); @@ -664,6 +763,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: Some("count() as total".to_string()), + downsample: None, }; let result = build_stats_query(&plan); diff --git a/rust/srql/src/query/memory_metrics.rs b/rust/srql/src/query/memory_metrics.rs index dce80c3c0..0ae842f93 100644 --- a/rust/srql/src/query/memory_metrics.rs +++ b/rust/srql/src/query/memory_metrics.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::MemoryMetricRow, @@ -38,10 +38,35 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(MemoryMetricRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -135,6 +160,49 @@ fn apply_filter<'a>(mut query: MemoryQuery<'a>, filter: &Filter) -> Result<Memor Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + crate::parser::FilterOp::Eq + | crate::parser::FilterOp::NotEq + | crate::parser::FilterOp::Like + | crate::parser::FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + crate::parser::FilterOp::In | crate::parser::FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" => { + collect_text_params(params, filter) + } + "usage_percent" => { + params.push(BindParam::Float(parse_f64(filter.value.as_scalar()?)?)); + Ok(()) + } + "total_bytes" | "used_bytes" | "available_bytes" => { + params.push(BindParam::Int(parse_i64(filter.value.as_scalar()?)?)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for memory_metrics: '{other}'" + ))), + } +} + fn apply_ordering<'a>(mut query: MemoryQuery<'a>, order: &[OrderClause]) -> MemoryQuery<'a> { let mut applied = false; for clause in order { @@ -239,6 +307,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); diff --git a/rust/srql/src/query/mod.rs b/rust/srql/src/query/mod.rs index 310bd17f3..61ded64c7 100644 --- a/rust/srql/src/query/mod.rs +++ b/rust/srql/src/query/mod.rs @@ -96,7 +96,9 @@ mod device_graph; mod device_updates; mod devices; mod disk_metrics; +mod downsample; mod events; +mod graph_cypher; mod interfaces; mod logs; mod memory_metrics; @@ -107,6 +109,7 @@ mod services; mod timeseries_metrics; mod trace_summaries; mod traces; +mod viz; use crate::{ config::AppConfig, @@ -122,6 +125,24 @@ use serde_json::Value; use std::sync::Arc; use tracing::error; +#[derive(Debug, Clone, Serialize)] +#[serde(tag = "t", content = "v", rename_all = "snake_case")] +pub enum BindParam { + Text(String), + TextArray(Vec<String>), + IntArray(Vec<i64>), + Bool(bool), + Int(i64), + Float(f64), + Timestamptz(String), +} + +impl BindParam { + fn timestamptz(value: chrono::DateTime<Utc>) -> Self { + Self::Timestamptz(value.to_rfc3339()) + } +} + #[derive(Clone)] pub struct QueryEngine { pool: PgPool, @@ -145,25 +166,30 @@ impl QueryEngine { ServiceError::Internal(anyhow::anyhow!("{err:?}")) })?; - let results = match plan.entity { - Entity::Devices => devices::execute(&mut conn, &plan).await?, - Entity::DeviceUpdates => device_updates::execute(&mut conn, &plan).await?, - Entity::DeviceGraph => device_graph::execute(&mut conn, &plan).await?, - Entity::Events => events::execute(&mut conn, &plan).await?, - Entity::Interfaces => interfaces::execute(&mut conn, &plan).await?, - Entity::Logs => logs::execute(&mut conn, &plan).await?, - Entity::Pollers => pollers::execute(&mut conn, &plan).await?, - Entity::OtelMetrics => otel_metrics::execute(&mut conn, &plan).await?, - Entity::RperfMetrics | Entity::TimeseriesMetrics | Entity::SnmpMetrics => { - timeseries_metrics::execute(&mut conn, &plan).await? + let results = if plan.downsample.is_some() { + downsample::execute(&mut conn, &plan).await? + } else { + match plan.entity { + Entity::Devices => devices::execute(&mut conn, &plan).await?, + Entity::DeviceUpdates => device_updates::execute(&mut conn, &plan).await?, + Entity::DeviceGraph => device_graph::execute(&mut conn, &plan).await?, + Entity::GraphCypher => graph_cypher::execute(&mut conn, &plan).await?, + Entity::Events => events::execute(&mut conn, &plan).await?, + Entity::Interfaces => interfaces::execute(&mut conn, &plan).await?, + Entity::Logs => logs::execute(&mut conn, &plan).await?, + Entity::Pollers => pollers::execute(&mut conn, &plan).await?, + Entity::OtelMetrics => otel_metrics::execute(&mut conn, &plan).await?, + Entity::RperfMetrics | Entity::TimeseriesMetrics | Entity::SnmpMetrics => { + timeseries_metrics::execute(&mut conn, &plan).await? + } + Entity::CpuMetrics => cpu_metrics::execute(&mut conn, &plan).await?, + Entity::MemoryMetrics => memory_metrics::execute(&mut conn, &plan).await?, + Entity::DiskMetrics => disk_metrics::execute(&mut conn, &plan).await?, + Entity::ProcessMetrics => process_metrics::execute(&mut conn, &plan).await?, + Entity::Services => services::execute(&mut conn, &plan).await?, + Entity::TraceSummaries => trace_summaries::execute(&mut conn, &plan).await?, + Entity::Traces => traces::execute(&mut conn, &plan).await?, } - Entity::CpuMetrics => cpu_metrics::execute(&mut conn, &plan).await?, - Entity::MemoryMetrics => memory_metrics::execute(&mut conn, &plan).await?, - Entity::DiskMetrics => disk_metrics::execute(&mut conn, &plan).await?, - Entity::ProcessMetrics => process_metrics::execute(&mut conn, &plan).await?, - Entity::Services => services::execute(&mut conn, &plan).await?, - Entity::TraceSummaries => trace_summaries::execute(&mut conn, &plan).await?, - Entity::Traces => traces::execute(&mut conn, &plan).await?, }; let pagination = self.build_pagination(&plan, results.len() as i64); @@ -175,41 +201,7 @@ impl QueryEngine { } pub async fn translate(&self, request: TranslateRequest) -> Result<TranslateResponse> { - let ast = parser::parse(&request.query)?; - let synthetic = QueryRequest { - query: request.query.clone(), - limit: None, - cursor: None, - direction: QueryDirection::Next, - mode: None, - }; - let plan = build_query_plan(&self.config, &synthetic, ast)?; - - let sql = match plan.entity { - Entity::Devices => devices::to_debug_sql(&plan)?, - Entity::DeviceUpdates => device_updates::to_debug_sql(&plan)?, - Entity::DeviceGraph => device_graph::to_debug_sql(&plan)?, - Entity::Events => events::to_debug_sql(&plan)?, - Entity::Interfaces => interfaces::to_debug_sql(&plan)?, - Entity::Logs => logs::to_debug_sql(&plan)?, - Entity::Pollers => pollers::to_debug_sql(&plan)?, - Entity::OtelMetrics => otel_metrics::to_debug_sql(&plan)?, - Entity::RperfMetrics | Entity::TimeseriesMetrics | Entity::SnmpMetrics => { - timeseries_metrics::to_debug_sql(&plan)? - } - Entity::CpuMetrics => cpu_metrics::to_debug_sql(&plan)?, - Entity::MemoryMetrics => memory_metrics::to_debug_sql(&plan)?, - Entity::DiskMetrics => disk_metrics::to_debug_sql(&plan)?, - Entity::ProcessMetrics => process_metrics::to_debug_sql(&plan)?, - Entity::Services => services::to_debug_sql(&plan)?, - Entity::TraceSummaries => trace_summaries::to_debug_sql(&plan)?, - Entity::Traces => traces::to_debug_sql(&plan)?, - }; - - Ok(TranslateResponse { - sql, - params: Vec::new(), - }) + translate_request(self.config(), QueryRequest::from(request)) } fn build_pagination(&self, plan: &QueryPlan, fetched: i64) -> PaginationMeta { @@ -260,6 +252,7 @@ fn build_query_plan( offset, time_range, stats: ast.stats, + downsample: ast.downsample, }) } @@ -269,6 +262,250 @@ fn determine_limit(config: &AppConfig, candidate: Option<i64>) -> i64 { candidate.unwrap_or(default).clamp(1, max) } +pub(super) fn max_dollar_placeholder(sql: &str) -> usize { + let bytes = sql.as_bytes(); + let mut max = 0usize; + let mut i = 0usize; + + while i < bytes.len() { + if bytes[i] != b'$' { + i += 1; + continue; + } + + i += 1; + if i >= bytes.len() || !bytes[i].is_ascii_digit() { + continue; + } + + let mut value = 0usize; + while i < bytes.len() && bytes[i].is_ascii_digit() { + value = value * 10 + (bytes[i] - b'0') as usize; + i += 1; + } + + max = max.max(value); + } + + max +} + +pub(super) fn reconcile_limit_offset_binds( + sql: &str, + params: &mut Vec<BindParam>, + limit: i64, + offset: i64, +) -> Result<()> { + let expected = max_dollar_placeholder(sql); + let current = params.len(); + if expected < current { + return Err(ServiceError::Internal(anyhow::anyhow!( + "sql expects {expected} binds but {current} were collected" + ))); + } + + match expected.saturating_sub(current) { + 0 => Ok(()), + 1 => { + params.push(BindParam::Int(limit)); + Ok(()) + } + 2 => { + params.push(BindParam::Int(limit)); + params.push(BindParam::Int(offset)); + Ok(()) + } + extra => Err(ServiceError::Internal(anyhow::anyhow!( + "unexpected bind arity gap: {extra}" + ))), + } +} + +pub(super) fn diesel_sql<T>(query: &T) -> Result<String> +where + T: diesel::query_builder::QueryFragment<diesel::pg::Pg>, +{ + use diesel::query_builder::QueryBuilder as _; + + let backend = diesel::pg::Pg; + let mut query_builder = <diesel::pg::Pg as diesel::backend::Backend>::QueryBuilder::default(); + diesel::query_builder::QueryFragment::<diesel::pg::Pg>::to_sql( + query, + &mut query_builder, + &backend, + ) + .map_err(|err| { + error!(error = ?err, "failed to serialize diesel SQL"); + ServiceError::Internal(anyhow::anyhow!("failed to serialize SQL")) + })?; + + Ok(query_builder.finish()) +} + +#[cfg(any(test, debug_assertions))] +pub(super) fn diesel_bind_count<T>(query: &T) -> Result<usize> +where + T: diesel::query_builder::QueryFragment<diesel::pg::Pg>, +{ + let rendered = diesel::debug_query::<diesel::pg::Pg, _>(query).to_string(); + let marker = "-- binds:"; + let binds = rendered + .split_once(marker) + .map(|(_, suffix)| suffix.trim()) + .ok_or_else(|| ServiceError::Internal(anyhow::anyhow!("missing binds marker")))?; + + count_debug_binds_list(binds).ok_or_else(|| { + ServiceError::Internal(anyhow::anyhow!("failed to parse diesel debug bind list")) + }) +} + +#[cfg(any(test, debug_assertions))] +fn count_debug_binds_list(binds: &str) -> Option<usize> { + let bytes = binds.as_bytes(); + let mut i = 0usize; + while i < bytes.len() && bytes[i].is_ascii_whitespace() { + i += 1; + } + + if i >= bytes.len() || bytes[i] != b'[' { + return None; + } + + let mut bracket_depth = 0i32; + let mut paren_depth = 0i32; + let mut brace_depth = 0i32; + let mut in_string = false; + let mut escape = false; + let mut in_item = false; + let mut count = 0usize; + + for &b in bytes[i..].iter() { + if in_string { + if escape { + escape = false; + continue; + } + + if b == b'\\' { + escape = true; + continue; + } + + if b == b'"' { + in_string = false; + } + + continue; + } + + match b { + b'"' => { + if bracket_depth == 1 && !in_item && brace_depth == 0 && paren_depth == 0 { + in_item = true; + } + in_string = true; + } + b'[' => { + if bracket_depth == 1 && !in_item && brace_depth == 0 && paren_depth == 0 { + in_item = true; + } + bracket_depth += 1; + if bracket_depth == 1 { + in_item = false; + } + } + b']' => { + if bracket_depth == 1 && in_item { + count += 1; + in_item = false; + } + bracket_depth -= 1; + if bracket_depth <= 0 { + break; + } + } + b'{' => { + if bracket_depth == 1 && !in_item && brace_depth == 0 && paren_depth == 0 { + in_item = true; + } + brace_depth += 1 + } + b'}' => brace_depth -= 1, + b'(' => { + if bracket_depth == 1 && !in_item && brace_depth == 0 && paren_depth == 0 { + in_item = true; + } + paren_depth += 1 + } + b')' => paren_depth -= 1, + b',' => { + if bracket_depth == 1 && brace_depth == 0 && paren_depth == 0 && in_item { + count += 1; + in_item = false; + } + } + b if b.is_ascii_whitespace() => {} + _ => { + if bracket_depth == 1 && !in_item { + in_item = true; + } + } + } + } + + Some(count) +} + +pub fn translate_request(config: &AppConfig, request: QueryRequest) -> Result<TranslateResponse> { + let ast = parser::parse(&request.query)?; + let plan = build_query_plan(config, &request, ast)?; + let viz = viz::meta_for_plan(&plan); + + let (sql, params) = if plan.downsample.is_some() { + downsample::to_sql_and_params(&plan)? + } else { + match plan.entity { + Entity::Devices => devices::to_sql_and_params(&plan)?, + Entity::DeviceUpdates => device_updates::to_sql_and_params(&plan)?, + Entity::DeviceGraph => device_graph::to_sql_and_params(&plan)?, + Entity::GraphCypher => graph_cypher::to_sql_and_params(&plan)?, + Entity::Events => events::to_sql_and_params(&plan)?, + Entity::Interfaces => interfaces::to_sql_and_params(&plan)?, + Entity::Logs => logs::to_sql_and_params(&plan)?, + Entity::Pollers => pollers::to_sql_and_params(&plan)?, + Entity::OtelMetrics => otel_metrics::to_sql_and_params(&plan)?, + Entity::RperfMetrics | Entity::TimeseriesMetrics | Entity::SnmpMetrics => { + timeseries_metrics::to_sql_and_params(&plan)? + } + Entity::CpuMetrics => cpu_metrics::to_sql_and_params(&plan)?, + Entity::MemoryMetrics => memory_metrics::to_sql_and_params(&plan)?, + Entity::DiskMetrics => disk_metrics::to_sql_and_params(&plan)?, + Entity::ProcessMetrics => process_metrics::to_sql_and_params(&plan)?, + Entity::Services => services::to_sql_and_params(&plan)?, + Entity::TraceSummaries => trace_summaries::to_sql_and_params(&plan)?, + Entity::Traces => traces::to_sql_and_params(&plan)?, + } + }; + + let next_cursor = Some(encode_cursor(plan.offset.saturating_add(plan.limit))); + let prev_cursor = if plan.offset > 0 { + Some(encode_cursor(plan.offset.saturating_sub(plan.limit))) + } else { + None + }; + + Ok(TranslateResponse { + sql, + params, + pagination: PaginationMeta { + next_cursor, + prev_cursor, + limit: Some(plan.limit), + }, + viz, + }) +} + #[cfg(test)] mod tests { use super::{devices, interfaces, pollers, *}; @@ -290,7 +527,7 @@ mod tests { assert!(matches!(plan.order[0].direction, OrderDirection::Desc)); assert!(has_availability_filter(&plan, true)); - let sql = devices::to_debug_sql(&plan).expect("should build SQL for docs query"); + let (sql, _) = devices::to_sql_and_params(&plan).expect("should build SQL for docs query"); assert!( sql.to_lowercase() .contains("\"unified_devices\".\"is_available\" = $3"), @@ -308,7 +545,7 @@ mod tests { assert!(plan.time_range.is_some()); assert!(has_availability_filter(&plan, false)); - let sql = devices::to_debug_sql(&plan).expect("should build SQL for docs query"); + let (sql, _) = devices::to_sql_and_params(&plan).expect("should build SQL for docs query"); assert!( sql.to_lowercase() .contains("\"unified_devices\".\"is_available\" = $3"), @@ -392,7 +629,7 @@ mod tests { assert!(matches!(plan.entity, Entity::Interfaces)); assert_eq!(plan.limit, 5); - let sql = interfaces::to_debug_sql(&plan).expect("should build interfaces SQL"); + let (sql, _) = interfaces::to_sql_and_params(&plan).expect("should build interfaces SQL"); let lower = sql.to_lowercase(); assert!( lower.contains("coalesce(") && lower.contains("ip_addresses"), @@ -409,7 +646,7 @@ mod tests { assert!(matches!(plan.entity, Entity::Pollers)); assert_eq!(plan.limit, 10); assert_eq!(plan.order[0].field, "agent_count"); - let sql = pollers::to_debug_sql(&plan).expect("should build pollers SQL"); + let (sql, _) = pollers::to_sql_and_params(&plan).expect("should build pollers SQL"); let lower = sql.to_lowercase(); assert!( lower.contains("\"pollers\".\"is_healthy\" =") @@ -449,6 +686,8 @@ mod tests { database_url: "postgres://example/db".to_string(), max_pool_size: 1, pg_ssl_root_cert: None, + pg_ssl_cert: None, + pg_ssl_key: None, api_key: None, api_key_kv_key: None, allowed_origins: None, @@ -459,6 +698,208 @@ mod tests { rate_limit_window: StdDuration::from_secs(60), } } + + #[test] + fn translate_param_arity_matches_sql_placeholders() { + let config = test_config(); + + let cursor = encode_cursor(250); + + let cases = [ + QueryRequest { + query: "in:devices stats:count() as total".to_string(), + limit: None, + cursor: None, + direction: QueryDirection::Next, + mode: None, + }, + QueryRequest { + query: "in:services available:false time:last_24h stats:count() as failing" + .to_string(), + limit: None, + cursor: None, + direction: QueryDirection::Next, + mode: None, + }, + QueryRequest { + query: "in:pollers is_healthy:true status:ready sort:agent_count:desc".to_string(), + limit: Some(10), + cursor: None, + direction: QueryDirection::Next, + mode: None, + }, + QueryRequest { + query: "in:devices time:last_7d sort:last_seen:desc is_available:true discovery_sources:(sweep,armis)".to_string(), + limit: Some(20), + cursor: Some(cursor.clone()), + direction: QueryDirection::Next, + mode: None, + }, + QueryRequest { + query: "in:interfaces time:last_24h ip_addresses:(10.0.0.1,10.0.0.2) sort:timestamp:asc".to_string(), + limit: Some(5), + cursor: None, + direction: QueryDirection::Next, + mode: None, + }, + QueryRequest { + query: "in:traces time:last_24h status_code:(1,2) kind:(1,2,3) sort:timestamp:desc".to_string(), + limit: Some(25), + cursor: None, + direction: QueryDirection::Next, + mode: None, + }, + QueryRequest { + query: "in:device_graph device_id:dev-1 collector_owned_only:true include_topology:false".to_string(), + limit: None, + cursor: None, + direction: QueryDirection::Next, + mode: None, + }, + ]; + + for request in cases { + let response = match translate_request(&config, request.clone()) { + Ok(response) => response, + Err(err) => { + panic!("translation failed for query '{}': {err:?}", request.query) + } + }; + let max_placeholder = super::max_dollar_placeholder(&response.sql); + assert_eq!( + max_placeholder, + response.params.len(), + "sql placeholders must match params length\nsql: {}\nparams: {:?}", + response.sql, + response.params + ); + } + } + + #[test] + fn translate_includes_visualization_metadata() { + let config = crate::config::AppConfig::embedded("postgres://unused/db".to_string()); + let request = QueryRequest { + query: "in:timeseries_metrics time:last_7d limit:10".to_string(), + limit: None, + cursor: None, + direction: QueryDirection::Next, + mode: None, + }; + + let response = translate_request(&config, request).expect("translation should succeed"); + let viz = response.viz.expect("viz metadata should be present"); + + assert!( + viz.columns.iter().any(|col| { + col.name == "timestamp" && matches!(col.col_type, viz::ColumnType::Timestamptz) + }), + "expected timestamp column meta, got: {:?}", + viz.columns + ); + + assert!( + viz.suggestions + .iter() + .any(|s| matches!(s.kind, viz::VizKind::Timeseries)), + "expected timeseries suggestion, got: {:?}", + viz.suggestions + ); + } + + #[test] + fn translate_downsample_emits_time_bucket_query() { + let config = crate::config::AppConfig::embedded("postgres://unused/db".to_string()); + let request = QueryRequest { + query: + "in:timeseries_metrics time:last_7d bucket:5m agg:avg series:metric_name limit:25" + .to_string(), + limit: None, + cursor: None, + direction: QueryDirection::Next, + mode: None, + }; + + let response = translate_request(&config, request).expect("translation should succeed"); + + assert!( + response.sql.to_lowercase().contains("time_bucket("), + "expected time_bucket in SQL, got: {}", + response.sql + ); + assert!( + response.sql.to_lowercase().contains("group by 1, 2"), + "expected group by bucket+series, got: {}", + response.sql + ); + + let viz = response.viz.expect("viz metadata should be present"); + assert_eq!(viz.columns.len(), 3); + assert!( + viz.suggestions + .iter() + .any(|s| matches!(s.kind, viz::VizKind::Timeseries)), + "expected timeseries suggestion, got: {:?}", + viz.suggestions + ); + + assert!( + response.params.len() >= 4, + "expected time range + limit/offset params, got: {:?}", + response.params + ); + } + + #[test] + fn translate_graph_cypher_rejects_mutations() { + let config = crate::config::AppConfig::embedded("postgres://unused/db".to_string()); + let request = QueryRequest { + query: "in:graph_cypher cypher:\"CREATE (n:Device {id:'x'}) RETURN 1 as result\"" + .to_string(), + limit: None, + cursor: None, + direction: QueryDirection::Next, + mode: None, + }; + + let err = translate_request(&config, request).expect_err("should reject write cypher"); + assert!( + err.to_string().to_lowercase().contains("read-only"), + "expected read-only error, got: {err}" + ); + } + + #[test] + fn translate_graph_cypher_wraps_rows_as_topology_payload() { + let config = crate::config::AppConfig::embedded("postgres://unused/db".to_string()); + let request = QueryRequest { + query: "in:graph_cypher cypher:\"MATCH (n) RETURN n\" limit:10".to_string(), + limit: None, + cursor: None, + direction: QueryDirection::Next, + mode: None, + }; + + let response = translate_request(&config, request).expect("translation should succeed"); + let sql = response.sql.to_lowercase(); + + assert!( + sql.contains("jsonb_build_object('nodes'"), + "expected topology wrapper in SQL, got: {}", + response.sql + ); + assert!( + sql.contains("jsonb_build_array"), + "expected jsonb_build_array in SQL, got: {}", + response.sql + ); + assert_eq!( + response.params.len(), + 2, + "expected limit + offset binds, got: {:?}", + response.params + ); + } } #[derive(Debug, Clone)] @@ -470,6 +911,7 @@ pub struct QueryPlan { pub offset: i64, pub time_range: Option<TimeRange>, pub stats: Option<String>, + pub downsample: Option<crate::parser::DownsampleSpec>, } #[derive(Debug, Clone, Deserialize, Serialize, Default)] @@ -496,6 +938,26 @@ pub struct QueryRequest { #[derive(Debug, Clone, Deserialize, Serialize)] pub struct TranslateRequest { pub query: String, + #[serde(default)] + pub limit: Option<i64>, + #[serde(default)] + pub cursor: Option<String>, + #[serde(default)] + pub direction: QueryDirection, + #[serde(default)] + pub mode: Option<String>, +} + +impl From<TranslateRequest> for QueryRequest { + fn from(request: TranslateRequest) -> Self { + Self { + query: request.query, + limit: request.limit, + cursor: request.cursor, + direction: request.direction, + mode: request.mode, + } + } } #[derive(Debug, Clone, Serialize, Default)] @@ -517,5 +979,8 @@ pub struct QueryResponse { pub struct TranslateResponse { pub sql: String, #[serde(skip_serializing_if = "Vec::is_empty", default)] - pub params: Vec<String>, + pub params: Vec<BindParam>, + pub pagination: PaginationMeta, + #[serde(skip_serializing_if = "Option::is_none")] + pub viz: Option<viz::VizMeta>, } diff --git a/rust/srql/src/query/otel_metrics.rs b/rust/srql/src/query/otel_metrics.rs index c4fe994b0..d83c72998 100644 --- a/rust/srql/src/query/otel_metrics.rs +++ b/rust/srql/src/query/otel_metrics.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::OtelMetricRow, @@ -52,14 +52,46 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(OtelMetricRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; + if let Some(stats_sql) = build_stats_query(plan)? { - return Ok(stats_sql.sql); + let sql = rewrite_placeholders(&stats_sql.sql); + let params = stats_sql + .binds + .into_iter() + .map(bind_param_from_stats) + .collect(); + return Ok((sql, params)); } - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -86,6 +118,44 @@ fn build_query(plan: &QueryPlan) -> Result<MetricsQuery<'static>> { Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "trace_id" | "span_id" | "service_name" | "service" | "span_name" | "span_kind" + | "metric_type" | "type" | "component" | "level" | "http_method" | "http_route" + | "http_status_code" | "grpc_service" | "grpc_method" | "grpc_status_code" => { + collect_text_params(params, filter) + } + "is_slow" => { + params.push(BindParam::Bool(parse_bool(filter.value.as_scalar()?)?)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for otel_metrics: '{other}'" + ))), + } +} + fn apply_filter<'a>(mut query: MetricsQuery<'a>, filter: &Filter) -> Result<MetricsQuery<'a>> { match filter.field.as_str() { "trace_id" => { @@ -243,6 +313,15 @@ impl SqlBindValue { } } +fn bind_param_from_stats(value: SqlBindValue) -> BindParam { + match value { + SqlBindValue::Text(value) => BindParam::Text(value), + SqlBindValue::TextArray(values) => BindParam::TextArray(values), + SqlBindValue::Bool(value) => BindParam::Bool(value), + SqlBindValue::Timestamp(value) => BindParam::timestamptz(value), + } +} + #[derive(Debug, QueryableByName)] struct MetricsStatsPayload { #[diesel(sql_type = Nullable<Jsonb>)] @@ -526,6 +605,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); diff --git a/rust/srql/src/query/pollers.rs b/rust/srql/src/query/pollers.rs index 661ba3d08..3fae19c9a 100644 --- a/rust/srql/src/query/pollers.rs +++ b/rust/srql/src/query/pollers.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::PollerRow, @@ -38,10 +38,36 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(PollerRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -110,6 +136,46 @@ fn apply_filter<'a>(mut query: PollersQuery<'a>, filter: &Filter) -> Result<Poll Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "poller_id" + | "status" + | "component_id" + | "registration_source" + | "spiffe_identity" + | "created_by" => collect_text_params(params, filter), + "is_healthy" => { + let value = parse_bool(filter.value.as_scalar()?)?; + params.push(BindParam::Bool(value)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field '{other}'" + ))), + } +} + fn apply_ordering<'a>(mut query: PollersQuery<'a>, order: &[OrderClause]) -> PollersQuery<'a> { let mut applied = false; for clause in order { @@ -128,6 +194,16 @@ fn apply_ordering<'a>(mut query: PollersQuery<'a>, order: &[OrderClause]) -> Pol query } +fn parse_bool(raw: &str) -> Result<bool> { + match raw.to_lowercase().as_str() { + "true" | "1" | "yes" => Ok(true), + "false" | "0" | "no" => Ok(false), + _ => Err(ServiceError::InvalidRequest(format!( + "invalid boolean value '{raw}'" + ))), + } +} + fn apply_single_order<'a>( query: PollersQuery<'a>, field: &str, @@ -212,16 +288,6 @@ fn apply_secondary_order<'a>( } } -fn parse_bool(raw: &str) -> Result<bool> { - match raw.to_lowercase().as_str() { - "true" | "1" | "yes" => Ok(true), - "false" | "0" | "no" => Ok(false), - other => Err(ServiceError::InvalidRequest(format!( - "invalid boolean value '{other}'" - ))), - } -} - #[cfg(test)] mod tests { use super::*; @@ -244,6 +310,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); diff --git a/rust/srql/src/query/process_metrics.rs b/rust/srql/src/query/process_metrics.rs index 58a33b579..2484117b7 100644 --- a/rust/srql/src/query/process_metrics.rs +++ b/rust/srql/src/query/process_metrics.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::ProcessMetricRow, @@ -37,10 +37,35 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(ProcessMetricRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -67,6 +92,53 @@ fn build_query(plan: &QueryPlan) -> Result<ProcessQuery<'static>> { Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "host_id" | "device_id" | "partition" | "name" | "status" + | "start_time" => collect_text_params(params, filter), + "pid" => { + params.push(BindParam::Int(i64::from(parse_i32( + filter.value.as_scalar()?, + )?))); + Ok(()) + } + "cpu_usage" => { + params.push(BindParam::Float(f64::from(parse_f32( + filter.value.as_scalar()?, + )?))); + Ok(()) + } + "memory_usage" => { + params.push(BindParam::Int(parse_i64(filter.value.as_scalar()?)?)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for process_metrics: '{other}'" + ))), + } +} + fn apply_filter<'a>(mut query: ProcessQuery<'a>, filter: &Filter) -> Result<ProcessQuery<'a>> { match filter.field.as_str() { "poller_id" => { @@ -253,6 +325,7 @@ mod tests { end: Utc::now() + ChronoDuration::minutes(1), }), stats: None, + downsample: None, }; let err = build_query(&plan).err().expect("expected error"); diff --git a/rust/srql/src/query/services.rs b/rust/srql/src/query/services.rs index b8b479b68..c99ab57fe 100644 --- a/rust/srql/src/query/services.rs +++ b/rust/srql/src/query/services.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::ServiceStatusRow, @@ -13,6 +13,7 @@ use crate::{ use diesel::pg::Pg; use diesel::prelude::*; use diesel::query_builder::{AsQuery, BoxedSelectStatement, FromClause}; +use diesel::sql_types::BigInt; use diesel::PgTextExpressionMethods; use diesel_async::{AsyncPgConnection, RunQueryDsl}; @@ -22,9 +23,21 @@ type ServiceStatusTable = crate::schema::service_status::table; type ServiceStatusFromClause = FromClause<ServiceStatusTable>; type ServicesQuery<'a> = BoxedSelectStatement<'a, <ServiceStatusTable as AsQuery>::SqlType, ServiceStatusFromClause, Pg>; +type ServicesStatsQuery<'a> = BoxedSelectStatement<'a, BigInt, ServiceStatusFromClause, Pg>; pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> Result<Vec<Value>> { ensure_entity(plan)?; + + if let Some(spec) = parse_stats_spec(plan.stats.as_deref())? { + let query = build_stats_query(plan, &spec)?; + let values: Vec<i64> = query + .load(conn) + .await + .map_err(|err| ServiceError::Internal(err.into()))?; + let count = values.into_iter().next().unwrap_or(0); + return Ok(vec![serde_json::json!({ spec.alias: count })]); + } + let query = build_query(plan)?; let rows: Vec<ServiceStatusRow> = query .limit(plan.limit) @@ -36,10 +49,63 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R Ok(rows.into_iter().map(ServiceStatusRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + if let Some(spec) = parse_stats_spec(plan.stats.as_deref())? { + let query = build_stats_query(plan, &spec)?; + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + return Ok((sql, params)); + } + + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -66,6 +132,72 @@ fn build_query(plan: &QueryPlan) -> Result<ServicesQuery<'static>> { Ok(query) } +#[derive(Debug, Clone)] +struct ServicesStatsSpec { + alias: String, +} + +fn parse_stats_spec(raw: Option<&str>) -> Result<Option<ServicesStatsSpec>> { + let raw = match raw { + Some(raw) if !raw.trim().is_empty() => raw.trim(), + _ => return Ok(None), + }; + + let tokens: Vec<&str> = raw.split_whitespace().collect(); + if tokens.len() < 3 { + return Err(ServiceError::InvalidRequest( + "stats expressions must be of the form 'count() as alias'".into(), + )); + } + + if !tokens[0].eq_ignore_ascii_case("count()") || !tokens[1].eq_ignore_ascii_case("as") { + return Err(ServiceError::InvalidRequest( + "services stats only support count()".into(), + )); + } + + let alias = tokens[2] + .trim_matches('"') + .trim_matches('\'') + .to_lowercase(); + + if alias.is_empty() + || alias + .chars() + .any(|ch| !ch.is_ascii_alphanumeric() && ch != '_') + { + return Err(ServiceError::InvalidRequest( + "stats alias must be alphanumeric".into(), + )); + } + + if tokens.len() > 3 { + return Err(ServiceError::InvalidRequest( + "services stats do not support grouping yet".into(), + )); + } + + Ok(Some(ServicesStatsSpec { alias })) +} + +fn build_stats_query( + plan: &QueryPlan, + spec: &ServicesStatsSpec, +) -> Result<ServicesStatsQuery<'static>> { + let mut query = service_status.into_boxed::<Pg>(); + + if let Some(TimeRange { start, end }) = &plan.time_range { + query = query.filter(col_timestamp.ge(*start).and(col_timestamp.le(*end))); + } + + for filter in &plan.filters { + query = apply_filter(query, filter)?; + } + + let select_sql = format!("coalesce(COUNT(*), 0) as {}", spec.alias); + Ok(query.select(diesel::dsl::sql::<BigInt>(&select_sql))) +} + fn apply_filter<'a>(mut query: ServicesQuery<'a>, filter: &Filter) -> Result<ServicesQuery<'a>> { match filter.field.as_str() { "service_name" | "name" => { @@ -112,6 +244,41 @@ fn apply_filter<'a>(mut query: ServicesQuery<'a>, filter: &Filter) -> Result<Ser Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "service_name" | "name" | "service_type" | "type" | "poller_id" | "agent_id" + | "partition" | "message" => collect_text_params(params, filter), + "available" => { + params.push(BindParam::Bool(parse_bool(filter.value.as_scalar()?)?)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for services: '{other}'" + ))), + } +} + fn apply_ordering<'a>(mut query: ServicesQuery<'a>, order: &[OrderClause]) -> ServicesQuery<'a> { let mut applied = false; for clause in order { @@ -190,6 +357,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); diff --git a/rust/srql/src/query/timeseries_metrics.rs b/rust/srql/src/query/timeseries_metrics.rs index bf78253dd..c22266360 100644 --- a/rust/srql/src/query/timeseries_metrics.rs +++ b/rust/srql/src/query/timeseries_metrics.rs @@ -1,6 +1,6 @@ //! SRQL support for timeseries-backed metrics (generic, SNMP, and rperf). -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::TimeseriesMetricRow, @@ -50,10 +50,42 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R .collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { let scope = ensure_entity(plan)?; - let query = build_query(plan, scope)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan, scope)? + .limit(plan.limit) + .offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + + if let MetricScope::Forced(metric_type) = scope { + params.push(BindParam::Text(metric_type.to_string())); + } + + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<MetricScope<'static>> { @@ -127,6 +159,51 @@ fn apply_filter<'a>( Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "poller_id" | "agent_id" | "metric_name" | "metric_type" | "device_id" + | "target_device_ip" | "partition" => collect_text_params(params, filter), + "if_index" => { + let value = filter + .value + .as_scalar()? + .parse::<i32>() + .map_err(|_| ServiceError::InvalidRequest("invalid if_index value".into()))?; + params.push(BindParam::Int(i64::from(value))); + Ok(()) + } + "value" => { + let value = parse_f64(filter.value.as_scalar()?)?; + params.push(BindParam::Float(value)); + Ok(()) + } + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for timeseries_metrics: '{other}'" + ))), + } +} + fn apply_if_index_filter<'a>( query: TimeseriesQuery<'a>, filter: &Filter, @@ -288,6 +365,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan, MetricScope::Any); diff --git a/rust/srql/src/query/trace_summaries.rs b/rust/srql/src/query/trace_summaries.rs index a4d2fc266..6d9c37ba9 100644 --- a/rust/srql/src/query/trace_summaries.rs +++ b/rust/srql/src/query/trace_summaries.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::TraceSummaryRow, @@ -61,10 +61,16 @@ pub(super) async fn execute(conn: &mut AsyncPgConnection, plan: &QueryPlan) -> R } } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; let summary_query = build_summary_query(plan)?; - Ok(summary_query.sql) + let sql = rewrite_placeholders(&summary_query.sql); + let params = summary_query + .binds + .into_iter() + .map(bind_param_from_query) + .collect(); + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -122,6 +128,17 @@ impl SqlBindValue { } } +fn bind_param_from_query(value: SqlBindValue) -> BindParam { + match value { + SqlBindValue::Text(value) => BindParam::Text(value), + SqlBindValue::TextArray(values) => BindParam::TextArray(values), + SqlBindValue::Int(value) => BindParam::Int(i64::from(value)), + SqlBindValue::BigInt(value) => BindParam::Int(value), + SqlBindValue::Float(value) => BindParam::Float(value), + SqlBindValue::Timestamp(value) => BindParam::timestamptz(value), + } +} + #[derive(Debug, QueryableByName)] struct TraceStatsPayload { #[diesel(sql_type = Nullable<Jsonb>)] diff --git a/rust/srql/src/query/traces.rs b/rust/srql/src/query/traces.rs index 6e10571ba..d755a02f3 100644 --- a/rust/srql/src/query/traces.rs +++ b/rust/srql/src/query/traces.rs @@ -1,4 +1,4 @@ -use super::QueryPlan; +use super::{BindParam, QueryPlan}; use crate::{ error::{Result, ServiceError}, models::TraceSpanRow, @@ -40,10 +40,35 @@ pub(super) async fn execute( Ok(rows.into_iter().map(TraceSpanRow::into_json).collect()) } -pub(super) fn to_debug_sql(plan: &QueryPlan) -> Result<String> { +pub(super) fn to_sql_and_params(plan: &QueryPlan) -> Result<(String, Vec<BindParam>)> { ensure_entity(plan)?; - let query = build_query(plan)?; - Ok(diesel::debug_query::<Pg, _>(&query.limit(plan.limit).offset(plan.offset)).to_string()) + let query = build_query(plan)?.limit(plan.limit).offset(plan.offset); + let sql = super::diesel_sql(&query)?; + + let mut params = Vec::new(); + if let Some(TimeRange { start, end }) = &plan.time_range { + params.push(BindParam::timestamptz(*start)); + params.push(BindParam::timestamptz(*end)); + } + + for filter in &plan.filters { + collect_filter_params(&mut params, filter)?; + } + + super::reconcile_limit_offset_binds(&sql, &mut params, plan.limit, plan.offset)?; + + #[cfg(any(test, debug_assertions))] + { + let bind_count = super::diesel_bind_count(&query)?; + if bind_count != params.len() { + return Err(ServiceError::Internal(anyhow::anyhow!( + "bind count mismatch (diesel {bind_count} vs params {})", + params.len() + ))); + } + } + + Ok((sql, params)) } fn ensure_entity(plan: &QueryPlan) -> Result<()> { @@ -118,6 +143,85 @@ fn apply_filter<'a>(mut query: TracesQuery<'a>, filter: &Filter) -> Result<Trace Ok(query) } +fn collect_text_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.op { + FilterOp::Eq | FilterOp::NotEq | FilterOp::Like | FilterOp::NotLike => { + params.push(BindParam::Text(filter.value.as_scalar()?.to_string())); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + let values = filter.value.as_list()?.to_vec(); + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::TextArray(values)); + Ok(()) + } + _ => Err(ServiceError::InvalidRequest(format!( + "unsupported operator for text filter: {:?}", + filter.op + ))), + } +} + +fn collect_i32_list(params: &mut Vec<BindParam>, filter: &Filter, err: &str) -> Result<()> { + let values: Vec<i32> = filter + .value + .as_list()? + .iter() + .map(|v| v.parse::<i32>()) + .collect::<std::result::Result<Vec<_>, _>>() + .map_err(|_| ServiceError::InvalidRequest(err.into()))?; + if values.is_empty() { + return Ok(()); + } + params.push(BindParam::IntArray( + values.into_iter().map(i64::from).collect(), + )); + Ok(()) +} + +fn collect_filter_params(params: &mut Vec<BindParam>, filter: &Filter) -> Result<()> { + match filter.field.as_str() { + "trace_id" | "span_id" | "parent_span_id" | "service_name" | "service.name" + | "service_version" | "service_instance" | "scope_name" | "scope_version" | "name" + | "span_name" | "status_message" => collect_text_params(params, filter), + "status_code" => match filter.op { + FilterOp::Eq | FilterOp::NotEq => { + let value = filter.value.as_scalar()?.parse::<i32>().map_err(|_| { + ServiceError::InvalidRequest("status_code must be an integer".into()) + })?; + params.push(BindParam::Int(i64::from(value))); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + collect_i32_list(params, filter, "status_code list must be integers") + } + _ => Err(ServiceError::InvalidRequest( + "status_code filter only supports equality or list comparisons".into(), + )), + }, + "kind" | "span_kind" => match filter.op { + FilterOp::Eq | FilterOp::NotEq => { + let value = filter.value.as_scalar()?.parse::<i32>().map_err(|_| { + ServiceError::InvalidRequest("span kind must be an integer".into()) + })?; + params.push(BindParam::Int(i64::from(value))); + Ok(()) + } + FilterOp::In | FilterOp::NotIn => { + collect_i32_list(params, filter, "span kind list must be integers") + } + _ => Err(ServiceError::InvalidRequest( + "kind filter only supports equality comparisons".into(), + )), + }, + other => Err(ServiceError::InvalidRequest(format!( + "unsupported filter field for traces: '{other}'" + ))), + } +} + fn apply_status_code_filter<'a>( mut query: TracesQuery<'a>, filter: &Filter, @@ -279,6 +383,7 @@ mod tests { offset: 0, time_range: Some(TimeRange { start, end }), stats: None, + downsample: None, }; let result = build_query(&plan); diff --git a/rust/srql/src/query/viz.rs b/rust/srql/src/query/viz.rs new file mode 100644 index 000000000..939f0557b --- /dev/null +++ b/rust/srql/src/query/viz.rs @@ -0,0 +1,605 @@ +use crate::parser::Entity; +use serde::Serialize; + +use super::QueryPlan; + +#[derive(Debug, Clone, Serialize)] +pub struct VizMeta { + pub columns: Vec<ColumnMeta>, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub suggestions: Vec<VizSuggestion>, +} + +#[derive(Debug, Clone, Serialize)] +pub struct ColumnMeta { + pub name: String, + #[serde(rename = "type")] + pub col_type: ColumnType, + #[serde(skip_serializing_if = "Option::is_none")] + pub semantic: Option<ColumnSemantic>, + #[serde(skip_serializing_if = "Option::is_none")] + pub unit: Option<String>, +} + +#[derive(Debug, Clone, Copy, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum ColumnType { + Text, + TextArray, + Bool, + Int, + IntArray, + Float, + Timestamptz, + Jsonb, +} + +#[derive(Debug, Clone, Copy, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum ColumnSemantic { + Id, + Time, + Value, + Label, + Series, +} + +#[derive(Debug, Clone, Serialize)] +pub struct VizSuggestion { + pub kind: VizKind, + #[serde(skip_serializing_if = "Option::is_none")] + pub x: Option<String>, + #[serde(skip_serializing_if = "Option::is_none")] + pub y: Option<String>, + #[serde(skip_serializing_if = "Option::is_none")] + pub series: Option<String>, +} + +#[derive(Debug, Clone, Copy, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum VizKind { + Timeseries, + Table, +} + +pub fn meta_for_plan(plan: &QueryPlan) -> Option<VizMeta> { + if plan.downsample.is_some() + && matches!( + plan.entity, + Entity::TimeseriesMetrics + | Entity::SnmpMetrics + | Entity::RperfMetrics + | Entity::CpuMetrics + | Entity::MemoryMetrics + | Entity::DiskMetrics + | Entity::ProcessMetrics + ) + { + return Some(VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("series", ColumnType::Text, Some(ColumnSemantic::Series)), + col("value", ColumnType::Float, Some(ColumnSemantic::Value)), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Timeseries, + x: Some("timestamp".to_string()), + y: Some("value".to_string()), + series: Some("series".to_string()), + }], + }); + } + + Some(match plan.entity { + Entity::Devices => VizMeta { + columns: vec![ + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("hostname", ColumnType::Text, Some(ColumnSemantic::Label)), + col("ip", ColumnType::Text, None), + col("mac", ColumnType::Text, None), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("discovery_sources", ColumnType::TextArray, None), + col("is_available", ColumnType::Bool, None), + col( + "first_seen", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col( + "last_seen", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col( + "last_heartbeat", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("device_type", ColumnType::Text, None), + col("service_type", ColumnType::Text, None), + col("service_status", ColumnType::Text, None), + col("metadata", ColumnType::Jsonb, None), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::Pollers => VizMeta { + columns: vec![ + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("status", ColumnType::Text, None), + col("spiffe_identity", ColumnType::Text, None), + col( + "first_registered", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col( + "first_seen", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col( + "last_seen", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("is_healthy", ColumnType::Bool, None), + col("agent_count", ColumnType::Int, None), + col("checker_count", ColumnType::Int, None), + col("metadata", ColumnType::Jsonb, None), + col( + "updated_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::Services => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col( + "service_name", + ColumnType::Text, + Some(ColumnSemantic::Label), + ), + col("service_type", ColumnType::Text, None), + col("available", ColumnType::Bool, None), + col("message", ColumnType::Text, None), + col("details", ColumnType::Text, None), + col("partition", ColumnType::Text, None), + col( + "created_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::Interfaces => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("device_ip", ColumnType::Text, None), + col("if_index", ColumnType::Int, None), + col("if_name", ColumnType::Text, Some(ColumnSemantic::Label)), + col("if_descr", ColumnType::Text, None), + col("if_alias", ColumnType::Text, None), + col("if_speed", ColumnType::Int, None), + col("if_phys_address", ColumnType::Text, None), + col("ip_addresses", ColumnType::TextArray, None), + col("if_admin_status", ColumnType::Int, None), + col("if_oper_status", ColumnType::Int, None), + col("metadata", ColumnType::Jsonb, None), + col( + "created_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::Events => VizMeta { + columns: vec![ + col( + "event_timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("event_type", ColumnType::Text, None), + col("source", ColumnType::Text, None), + col("subject", ColumnType::Text, None), + col("severity", ColumnType::Text, None), + col("short_message", ColumnType::Text, None), + col( + "created_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::Logs => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("trace_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("span_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("severity_text", ColumnType::Text, None), + col("severity_number", ColumnType::Int, None), + col("body", ColumnType::Text, None), + col("service_name", ColumnType::Text, None), + col( + "created_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::Traces => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("trace_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("span_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("parent_span_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("name", ColumnType::Text, Some(ColumnSemantic::Label)), + col("start_time_unix_nano", ColumnType::Int, None), + col("end_time_unix_nano", ColumnType::Int, None), + col("service_name", ColumnType::Text, None), + col("status_code", ColumnType::Int, None), + col( + "created_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::TraceSummaries => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("trace_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("root_span_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col( + "root_span_name", + ColumnType::Text, + Some(ColumnSemantic::Label), + ), + col( + "root_service_name", + ColumnType::Text, + Some(ColumnSemantic::Label), + ), + col("root_span_kind", ColumnType::Int, None), + col( + "duration_ms", + ColumnType::Float, + Some(ColumnSemantic::Value), + ) + .with_unit("ms"), + col("service_set", ColumnType::TextArray, None), + col("span_count", ColumnType::Int, Some(ColumnSemantic::Value)), + col("error_count", ColumnType::Int, Some(ColumnSemantic::Value)), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::OtelMetrics => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("trace_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("span_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("service_name", ColumnType::Text, None), + col("span_name", ColumnType::Text, Some(ColumnSemantic::Label)), + col( + "duration_ms", + ColumnType::Float, + Some(ColumnSemantic::Value), + ) + .with_unit("ms"), + col("metric_type", ColumnType::Text, None), + col("http_method", ColumnType::Text, None), + col("http_route", ColumnType::Text, None), + col("http_status_code", ColumnType::Text, None), + col("grpc_service", ColumnType::Text, None), + col("grpc_method", ColumnType::Text, None), + col("grpc_status_code", ColumnType::Text, None), + col("is_slow", ColumnType::Bool, None), + col("component", ColumnType::Text, None), + col("level", ColumnType::Text, None), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::TimeseriesMetrics | Entity::SnmpMetrics | Entity::RperfMetrics => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col( + "metric_name", + ColumnType::Text, + Some(ColumnSemantic::Series), + ), + col("metric_type", ColumnType::Text, None), + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("value", ColumnType::Float, Some(ColumnSemantic::Value)), + col("unit", ColumnType::Text, None), + col("tags", ColumnType::Jsonb, None), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Timeseries, + x: Some("timestamp".to_string()), + y: Some("value".to_string()), + series: Some("metric_name".to_string()), + }], + }, + Entity::CpuMetrics => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("host_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("core_id", ColumnType::Int, None), + col( + "usage_percent", + ColumnType::Float, + Some(ColumnSemantic::Value), + ) + .with_unit("percent"), + col( + "frequency_hz", + ColumnType::Float, + Some(ColumnSemantic::Value), + ) + .with_unit("hz"), + col("label", ColumnType::Text, Some(ColumnSemantic::Label)), + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Timeseries, + x: Some("timestamp".to_string()), + y: Some("usage_percent".to_string()), + series: Some("label".to_string()), + }], + }, + Entity::MemoryMetrics => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("host_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col( + "usage_percent", + ColumnType::Float, + Some(ColumnSemantic::Value), + ) + .with_unit("percent"), + col("used_bytes", ColumnType::Int, Some(ColumnSemantic::Value)).with_unit("bytes"), + col( + "available_bytes", + ColumnType::Int, + Some(ColumnSemantic::Value), + ) + .with_unit("bytes"), + col("total_bytes", ColumnType::Int, Some(ColumnSemantic::Value)).with_unit("bytes"), + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Timeseries, + x: Some("timestamp".to_string()), + y: Some("usage_percent".to_string()), + series: None, + }], + }, + Entity::DiskMetrics => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("host_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("mount_point", ColumnType::Text, Some(ColumnSemantic::Label)), + col( + "usage_percent", + ColumnType::Float, + Some(ColumnSemantic::Value), + ) + .with_unit("percent"), + col("used_bytes", ColumnType::Int, Some(ColumnSemantic::Value)).with_unit("bytes"), + col( + "available_bytes", + ColumnType::Int, + Some(ColumnSemantic::Value), + ) + .with_unit("bytes"), + col("total_bytes", ColumnType::Int, Some(ColumnSemantic::Value)).with_unit("bytes"), + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Timeseries, + x: Some("timestamp".to_string()), + y: Some("usage_percent".to_string()), + series: Some("mount_point".to_string()), + }], + }, + Entity::ProcessMetrics => VizMeta { + columns: vec![ + col( + "timestamp", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("host_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("pid", ColumnType::Int, Some(ColumnSemantic::Id)), + col("name", ColumnType::Text, Some(ColumnSemantic::Label)), + col("cpu_usage", ColumnType::Float, Some(ColumnSemantic::Value)) + .with_unit("percent"), + col("memory_usage", ColumnType::Int, Some(ColumnSemantic::Value)) + .with_unit("bytes"), + col("status", ColumnType::Text, None), + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Timeseries, + x: Some("timestamp".to_string()), + y: Some("cpu_usage".to_string()), + series: Some("name".to_string()), + }], + }, + Entity::DeviceUpdates => VizMeta { + columns: vec![ + col( + "observed_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + col("device_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("agent_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("poller_id", ColumnType::Text, Some(ColumnSemantic::Id)), + col("discovery_source", ColumnType::Text, None), + col("ip", ColumnType::Text, None), + col("mac", ColumnType::Text, None), + col("hostname", ColumnType::Text, None), + col("available", ColumnType::Bool, None), + col("metadata", ColumnType::Jsonb, None), + col( + "created_at", + ColumnType::Timestamptz, + Some(ColumnSemantic::Time), + ), + ], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::DeviceGraph => VizMeta { + columns: vec![col("result", ColumnType::Jsonb, None)], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + Entity::GraphCypher => VizMeta { + columns: vec![col("result", ColumnType::Jsonb, None)], + suggestions: vec![VizSuggestion { + kind: VizKind::Table, + x: None, + y: None, + series: None, + }], + }, + }) +} + +fn col(name: &str, col_type: ColumnType, semantic: Option<ColumnSemantic>) -> ColumnMeta { + ColumnMeta { + name: name.to_string(), + col_type, + semantic, + unit: None, + } +} + +impl ColumnMeta { + fn with_unit(mut self, unit: &str) -> Self { + self.unit = Some(unit.to_string()); + self + } +} diff --git a/rust/srql/src/schema.rs b/rust/srql/src/schema.rs index 7a26c2a59..680cf44a4 100644 --- a/rust/srql/src/schema.rs +++ b/rust/srql/src/schema.rs @@ -180,6 +180,7 @@ diesel::table! { is_slow -> Nullable<Bool>, component -> Nullable<Text>, level -> Nullable<Text>, + unit -> Nullable<Text>, created_at -> Timestamptz, } } diff --git a/rust/srql/tests/support/harness.rs b/rust/srql/tests/support/harness.rs index 5add1eda7..13004060d 100644 --- a/rust/srql/tests/support/harness.rs +++ b/rust/srql/tests/support/harness.rs @@ -100,6 +100,8 @@ fn test_config(database_url: String) -> AppConfig { database_url, max_pool_size: 5, pg_ssl_root_cert: None, + pg_ssl_cert: None, + pg_ssl_key: None, api_key: Some(API_KEY.to_string()), api_key_kv_key: None, allowed_origins: None, diff --git a/web-ng/.formatter.exs b/web-ng/.formatter.exs new file mode 100644 index 000000000..ef8840ce6 --- /dev/null +++ b/web-ng/.formatter.exs @@ -0,0 +1,6 @@ +[ + import_deps: [:ecto, :ecto_sql, :phoenix], + subdirectories: ["priv/*/migrations"], + plugins: [Phoenix.LiveView.HTMLFormatter], + inputs: ["*.{heex,ex,exs}", "{config,lib,test}/**/*.{heex,ex,exs}", "priv/*/seeds.exs"] +] diff --git a/web-ng/.gitignore b/web-ng/.gitignore new file mode 100644 index 000000000..70cd403ed --- /dev/null +++ b/web-ng/.gitignore @@ -0,0 +1,37 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where 3rd-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Temporary files, for example, from tests. +/tmp/ + +# Ignore package tarball (built via "mix hex.build"). +serviceradar_web_ng-*.tar + +# Ignore assets that are produced by build tools. +/priv/static/assets/ + +# Ignore digested assets cache. +/priv/static/cache_manifest.json + +# In case you use Node.js/npm, you want to ignore these. +npm-debug.log +/assets/node_modules/ + diff --git a/web-ng/AGENTS.md b/web-ng/AGENTS.md new file mode 100644 index 000000000..977dff73a --- /dev/null +++ b/web-ng/AGENTS.md @@ -0,0 +1,510 @@ +This is a web application written using the Phoenix web framework. + +## Project guidelines + +- Use `mix precommit` alias when you are done with all changes and fix any pending issues +- Use the already included and available `:req` (`Req`) library for HTTP requests, **avoid** `:httpoison`, `:tesla`, and `:httpc`. Req is included by default and is the preferred HTTP client for Phoenix apps + +### Phoenix v1.8 guidelines + +- **Always** begin your LiveView templates with `<Layouts.app flash={@flash} ...>` which wraps all inner content +- The `MyAppWeb.Layouts` module is aliased in the `my_app_web.ex` file, so you can use it without needing to alias it again +- Anytime you run into errors with no `current_scope` assign: + - You failed to follow the Authenticated Routes guidelines, or you failed to pass `current_scope` to `<Layouts.app>` + - **Always** fix the `current_scope` error by moving your routes to the proper `live_session` and ensure you pass `current_scope` as needed +- Phoenix v1.8 moved the `<.flash_group>` component to the `Layouts` module. You are **forbidden** from calling `<.flash_group>` outside of the `layouts.ex` module +- Out of the box, `core_components.ex` imports an `<.icon name="hero-x-mark" class="w-5 h-5"/>` component for for hero icons. **Always** use the `<.icon>` component for icons, **never** use `Heroicons` modules or similar +- **Always** use the imported `<.input>` component for form inputs from `core_components.ex` when available. `<.input>` is imported and using it will save steps and prevent errors +- If you override the default input classes (`<.input class="myclass px-2 py-1 rounded-lg">)`) class with your own values, no default classes are inherited, so your +custom classes must fully style the input + +### JS and CSS guidelines + +- **Use Tailwind CSS classes and custom CSS rules** to create polished, responsive, and visually stunning interfaces. +- Tailwindcss v4 **no longer needs a tailwind.config.js** and uses a new import syntax in `app.css`: + + @import "tailwindcss" source(none); + @source "../css"; + @source "../js"; + @source "../../lib/my_app_web"; + +- **Always use and maintain this import syntax** in the app.css file for projects generated with `phx.new` +- **Never** use `@apply` when writing raw css +- **Always** manually write your own tailwind-based components instead of using daisyUI for a unique, world-class design +- Out of the box **only the app.js and app.css bundles are supported** + - You cannot reference an external vendor'd script `src` or link `href` in the layouts + - You must import the vendor deps into app.js and app.css to use them + - **Never write inline <script>custom js</script> tags within templates** + +### UI/UX & design guidelines + +- **Produce world-class UI designs** with a focus on usability, aesthetics, and modern design principles +- Implement **subtle micro-interactions** (e.g., button hover effects, and smooth transitions) +- Ensure **clean typography, spacing, and layout balance** for a refined, premium look +- Focus on **delightful details** like hover effects, loading states, and smooth page transitions + + +<!-- phoenix-gen-auth-start --> +## Authentication + +- **Always** handle authentication flow at the router level with proper redirects +- **Always** be mindful of where to place routes. `phx.gen.auth` creates multiple router plugs and `live_session` scopes: + - A plug `:fetch_current_scope_for_user` that is included in the default browser pipeline + - A plug `:require_authenticated_user` that redirects to the log in page when the user is not authenticated + - A `live_session :current_user` scope - for routes that need the current user but don't require authentication, similar to `:fetch_current_scope_for_user` + - A `live_session :require_authenticated_user` scope - for routes that require authentication, similar to the plug with the same name + - In both cases, a `@current_scope` is assigned to the Plug connection and LiveView socket + - A plug `redirect_if_user_is_authenticated` that redirects to a default path in case the user is authenticated - useful for a registration page that should only be shown to unauthenticated users +- **Always let the user know in which router scopes, `live_session`, and pipeline you are placing the route, AND SAY WHY** +- `phx.gen.auth` assigns the `current_scope` assign - it **does not assign a `current_user` assign** +- Always pass the assign `current_scope` to context modules as first argument. When performing queries, use `current_scope.user` to filter the query results +- To derive/access `current_user` in templates, **always use the `@current_scope.user`**, never use **`@current_user`** in templates or LiveViews +- **Never** duplicate `live_session` names. A `live_session :current_user` can only be defined __once__ in the router, so all routes for the `live_session :current_user` must be grouped in a single block +- Anytime you hit `current_scope` errors or the logged in session isn't displaying the right content, **always double check the router and ensure you are using the correct plug and `live_session` as described below** + +### Routes that require authentication + +LiveViews that require login should **always be placed inside the __existing__ `live_session :require_authenticated_user` block**: + + scope "/", AppWeb do + pipe_through [:browser, :require_authenticated_user] + + live_session :require_authenticated_user, + on_mount: [{ServiceRadarWebNGWeb.UserAuth, :require_authenticated}] do + # phx.gen.auth generated routes + live "/users/settings", UserLive.Settings, :edit + live "/users/settings/confirm-email/:token", UserLive.Settings, :confirm_email + # our own routes that require logged in user + live "/", MyLiveThatRequiresAuth, :index + end + end + +Controller routes must be placed in a scope that sets the `:require_authenticated_user` plug: + + scope "/", AppWeb do + pipe_through [:browser, :require_authenticated_user] + + get "/", MyControllerThatRequiresAuth, :index + end + +### Routes that work with or without authentication + +LiveViews that can work with or without authentication, **always use the __existing__ `:current_user` scope**, ie: + + scope "/", MyAppWeb do + pipe_through [:browser] + + live_session :current_user, + on_mount: [{ServiceRadarWebNGWeb.UserAuth, :mount_current_scope}] do + # our own routes that work with or without authentication + live "/", PublicLive + end + end + +Controllers automatically have the `current_scope` available if they use the `:browser` pipeline. + +<!-- phoenix-gen-auth-end --> + +<!-- usage-rules-start --> + +<!-- phoenix:elixir-start --> +## Elixir guidelines + +- Elixir lists **do not support index based access via the access syntax** + + **Never do this (invalid)**: + + i = 0 + mylist = ["blue", "green"] + mylist[i] + + Instead, **always** use `Enum.at`, pattern matching, or `List` for index based list access, ie: + + i = 0 + mylist = ["blue", "green"] + Enum.at(mylist, i) + +- Elixir variables are immutable, but can be rebound, so for block expressions like `if`, `case`, `cond`, etc + you *must* bind the result of the expression to a variable if you want to use it and you CANNOT rebind the result inside the expression, ie: + + # INVALID: we are rebinding inside the `if` and the result never gets assigned + if connected?(socket) do + socket = assign(socket, :val, val) + end + + # VALID: we rebind the result of the `if` to a new variable + socket = + if connected?(socket) do + assign(socket, :val, val) + end + +- **Never** nest multiple modules in the same file as it can cause cyclic dependencies and compilation errors +- **Never** use map access syntax (`changeset[:field]`) on structs as they do not implement the Access behaviour by default. For regular structs, you **must** access the fields directly, such as `my_struct.field` or use higher level APIs that are available on the struct if they exist, `Ecto.Changeset.get_field/2` for changesets +- Elixir's standard library has everything necessary for date and time manipulation. Familiarize yourself with the common `Time`, `Date`, `DateTime`, and `Calendar` interfaces by accessing their documentation as necessary. **Never** install additional dependencies unless asked or for date/time parsing (which you can use the `date_time_parser` package) +- Don't use `String.to_atom/1` on user input (memory leak risk) +- Predicate function names should not start with `is_` and should end in a question mark. Names like `is_thing` should be reserved for guards +- Elixir's builtin OTP primitives like `DynamicSupervisor` and `Registry`, require names in the child spec, such as `{DynamicSupervisor, name: MyApp.MyDynamicSup}`, then you can use `DynamicSupervisor.start_child(MyApp.MyDynamicSup, child_spec)` +- Use `Task.async_stream(collection, callback, options)` for concurrent enumeration with back-pressure. The majority of times you will want to pass `timeout: :infinity` as option + +## Mix guidelines + +- Read the docs and options before using tasks (by using `mix help task_name`) +- To debug test failures, run tests in a specific file with `mix test test/my_test.exs` or run all previously failed tests with `mix test --failed` +- `mix deps.clean --all` is **almost never needed**. **Avoid** using it unless you have good reason + +## Test guidelines + +- **Always use `start_supervised!/1`** to start processes in tests as it guarantees cleanup between tests +- **Avoid** `Process.sleep/1` and `Process.alive?/1` in tests + - Instead of sleeping to wait for a process to finish, **always** use `Process.monitor/1` and assert on the DOWN message: + + ref = Process.monitor(pid) + assert_receive {:DOWN, ^ref, :process, ^pid, :normal} + + - Instead of sleeping to synchronize before the next call, **always** use `_ = :sys.get_state/1` to ensure the process has handled prior messages +<!-- phoenix:elixir-end --> + +<!-- phoenix:phoenix-start --> +## Phoenix guidelines + +- Remember Phoenix router `scope` blocks include an optional alias which is prefixed for all routes within the scope. **Always** be mindful of this when creating routes within a scope to avoid duplicate module prefixes. + +- You **never** need to create your own `alias` for route definitions! The `scope` provides the alias, ie: + + scope "/admin", AppWeb.Admin do + pipe_through :browser + + live "/users", UserLive, :index + end + + the UserLive route would point to the `AppWeb.Admin.UserLive` module + +- `Phoenix.View` no longer is needed or included with Phoenix, don't use it +<!-- phoenix:phoenix-end --> + +<!-- phoenix:ecto-start --> +## Ecto Guidelines + +- **Always** preload Ecto associations in queries when they'll be accessed in templates, ie a message that needs to reference the `message.user.email` +- Remember `import Ecto.Query` and other supporting modules when you write `seeds.exs` +- `Ecto.Schema` fields always use the `:string` type, even for `:text`, columns, ie: `field :name, :string` +- `Ecto.Changeset.validate_number/2` **DOES NOT SUPPORT the `:allow_nil` option**. By default, Ecto validations only run if a change for the given field exists and the change value is not nil, so such as option is never needed +- You **must** use `Ecto.Changeset.get_field(changeset, :field)` to access changeset fields +- Fields which are set programatically, such as `user_id`, must not be listed in `cast` calls or similar for security purposes. Instead they must be explicitly set when creating the struct +- **Always** invoke `mix ecto.gen.migration migration_name_using_underscores` when generating migration files, so the correct timestamp and conventions are applied +<!-- phoenix:ecto-end --> + +<!-- phoenix:html-start --> +## Phoenix HTML guidelines + +- Phoenix templates **always** use `~H` or .html.heex files (known as HEEx), **never** use `~E` +- **Always** use the imported `Phoenix.Component.form/1` and `Phoenix.Component.inputs_for/1` function to build forms. **Never** use `Phoenix.HTML.form_for` or `Phoenix.HTML.inputs_for` as they are outdated +- When building forms **always** use the already imported `Phoenix.Component.to_form/2` (`assign(socket, form: to_form(...))` and `<.form for={@form} id="msg-form">`), then access those forms in the template via `@form[:field]` +- **Always** add unique DOM IDs to key elements (like forms, buttons, etc) when writing templates, these IDs can later be used in tests (`<.form for={@form} id="product-form">`) +- For "app wide" template imports, you can import/alias into the `my_app_web.ex`'s `html_helpers` block, so they will be available to all LiveViews, LiveComponent's, and all modules that do `use MyAppWeb, :html` (replace "my_app" by the actual app name) + +- Elixir supports `if/else` but **does NOT support `if/else if` or `if/elsif`**. **Never use `else if` or `elseif` in Elixir**, **always** use `cond` or `case` for multiple conditionals. + + **Never do this (invalid)**: + + <%= if condition do %> + ... + <% else if other_condition %> + ... + <% end %> + + Instead **always** do this: + + <%= cond do %> + <% condition -> %> + ... + <% condition2 -> %> + ... + <% true -> %> + ... + <% end %> + +- HEEx require special tag annotation if you want to insert literal curly's like `{` or `}`. If you want to show a textual code snippet on the page in a `<pre>` or `<code>` block you *must* annotate the parent tag with `phx-no-curly-interpolation`: + + <code phx-no-curly-interpolation> + let obj = {key: "val"} + </code> + + Within `phx-no-curly-interpolation` annotated tags, you can use `{` and `}` without escaping them, and dynamic Elixir expressions can still be used with `<%= ... %>` syntax + +- HEEx class attrs support lists, but you must **always** use list `[...]` syntax. You can use the class list syntax to conditionally add classes, **always do this for multiple class values**: + + <a class={[ + "px-2 text-white", + @some_flag && "py-5", + if(@other_condition, do: "border-red-500", else: "border-blue-100"), + ... + ]}>Text</a> + + and **always** wrap `if`'s inside `{...}` expressions with parens, like done above (`if(@other_condition, do: "...", else: "...")`) + + and **never** do this, since it's invalid (note the missing `[` and `]`): + + <a class={ + "px-2 text-white", + @some_flag && "py-5" + }> ... + => Raises compile syntax error on invalid HEEx attr syntax + +- **Never** use `<% Enum.each %>` or non-for comprehensions for generating template content, instead **always** use `<%= for item <- @collection do %>` +- HEEx HTML comments use `<%!-- comment --%>`. **Always** use the HEEx HTML comment syntax for template comments (`<%!-- comment --%>`) +- HEEx allows interpolation via `{...}` and `<%= ... %>`, but the `<%= %>` **only** works within tag bodies. **Always** use the `{...}` syntax for interpolation within tag attributes, and for interpolation of values within tag bodies. **Always** interpolate block constructs (if, cond, case, for) within tag bodies using `<%= ... %>`. + + **Always** do this: + + <div id={@id}> + {@my_assign} + <%= if @some_block_condition do %> + {@another_assign} + <% end %> + </div> + + and **Never** do this – the program will terminate with a syntax error: + + <%!-- THIS IS INVALID NEVER EVER DO THIS --%> + <div id="<%= @invalid_interpolation %>"> + {if @invalid_block_construct do} + {end} + </div> +<!-- phoenix:html-end --> + +<!-- phoenix:liveview-start --> +## Phoenix LiveView guidelines + +- **Never** use the deprecated `live_redirect` and `live_patch` functions, instead **always** use the `<.link navigate={href}>` and `<.link patch={href}>` in templates, and `push_navigate` and `push_patch` functions LiveViews +- **Avoid LiveComponent's** unless you have a strong, specific need for them +- LiveViews should be named like `AppWeb.WeatherLive`, with a `Live` suffix. When you go to add LiveView routes to the router, the default `:browser` scope is **already aliased** with the `AppWeb` module, so you can just do `live "/weather", WeatherLive` + +### LiveView streams + +- **Always** use LiveView streams for collections for assigning regular lists to avoid memory ballooning and runtime termination with the following operations: + - basic append of N items - `stream(socket, :messages, [new_msg])` + - resetting stream with new items - `stream(socket, :messages, [new_msg], reset: true)` (e.g. for filtering items) + - prepend to stream - `stream(socket, :messages, [new_msg], at: -1)` + - deleting items - `stream_delete(socket, :messages, msg)` + +- When using the `stream/3` interfaces in the LiveView, the LiveView template must 1) always set `phx-update="stream"` on the parent element, with a DOM id on the parent element like `id="messages"` and 2) consume the `@streams.stream_name` collection and use the id as the DOM id for each child. For a call like `stream(socket, :messages, [new_msg])` in the LiveView, the template would be: + + <div id="messages" phx-update="stream"> + <div :for={{id, msg} <- @streams.messages} id={id}> + {msg.text} + </div> + </div> + +- LiveView streams are *not* enumerable, so you cannot use `Enum.filter/2` or `Enum.reject/2` on them. Instead, if you want to filter, prune, or refresh a list of items on the UI, you **must refetch the data and re-stream the entire stream collection, passing reset: true**: + + def handle_event("filter", %{"filter" => filter}, socket) do + # re-fetch the messages based on the filter + messages = list_messages(filter) + + {:noreply, + socket + |> assign(:messages_empty?, messages == []) + # reset the stream with the new messages + |> stream(:messages, messages, reset: true)} + end + +- LiveView streams *do not support counting or empty states*. If you need to display a count, you must track it using a separate assign. For empty states, you can use Tailwind classes: + + <div id="tasks" phx-update="stream"> + <div class="hidden only:block">No tasks yet</div> + <div :for={{id, task} <- @stream.tasks} id={id}> + {task.name} + </div> + </div> + + The above only works if the empty state is the only HTML block alongside the stream for-comprehension. + +- When updating an assign that should change content inside any streamed item(s), you MUST re-stream the items + along with the updated assign: + + def handle_event("edit_message", %{"message_id" => message_id}, socket) do + message = Chat.get_message!(message_id) + edit_form = to_form(Chat.change_message(message, %{content: message.content})) + + # re-insert message so @editing_message_id toggle logic takes effect for that stream item + {:noreply, + socket + |> stream_insert(:messages, message) + |> assign(:editing_message_id, String.to_integer(message_id)) + |> assign(:edit_form, edit_form)} + end + + And in the template: + + <div id="messages" phx-update="stream"> + <div :for={{id, message} <- @streams.messages} id={id} class="flex group"> + {message.username} + <%= if @editing_message_id == message.id do %> + <%!-- Edit mode --%> + <.form for={@edit_form} id="edit-form-#{message.id}" phx-submit="save_edit"> + ... + </.form> + <% end %> + </div> + </div> + +- **Never** use the deprecated `phx-update="append"` or `phx-update="prepend"` for collections + +### LiveView JavaScript interop + +- Remember anytime you use `phx-hook="MyHook"` and that JS hook manages its own DOM, you **must** also set the `phx-update="ignore"` attribute +- **Always** provide an unique DOM id alongside `phx-hook` otherwise a compiler error will be raised + +LiveView hooks come in two flavors, 1) colocated js hooks for "inline" scripts defined inside HEEx, +and 2) external `phx-hook` annotations where JavaScript object literals are defined and passed to the `LiveSocket` constructor. + +#### Inline colocated js hooks + +**Never** write raw embedded `<script>` tags in heex as they are incompatible with LiveView. +Instead, **always use a colocated js hook script tag (`:type={Phoenix.LiveView.ColocatedHook}`) +when writing scripts inside the template**: + + <input type="text" name="user[phone_number]" id="user-phone-number" phx-hook=".PhoneNumber" /> + <script :type={Phoenix.LiveView.ColocatedHook} name=".PhoneNumber"> + export default { + mounted() { + this.el.addEventListener("input", e => { + let match = this.el.value.replace(/\D/g, "").match(/^(\d{3})(\d{3})(\d{4})$/) + if(match) { + this.el.value = `${match[1]}-${match[2]}-${match[3]}` + } + }) + } + } + </script> + +- colocated hooks are automatically integrated into the app.js bundle +- colocated hooks names **MUST ALWAYS** start with a `.` prefix, i.e. `.PhoneNumber` + +#### External phx-hook + +External JS hooks (`<div id="myhook" phx-hook="MyHook">`) must be placed in `assets/js/` and passed to the +LiveSocket constructor: + + const MyHook = { + mounted() { ... } + } + let liveSocket = new LiveSocket("/live", Socket, { + hooks: { MyHook } + }); + +#### Pushing events between client and server + +Use LiveView's `push_event/3` when you need to push events/data to the client for a phx-hook to handle. +**Always** return or rebind the socket on `push_event/3` when pushing events: + + # re-bind socket so we maintain event state to be pushed + socket = push_event(socket, "my_event", %{...}) + + # or return the modified socket directly: + def handle_event("some_event", _, socket) do + {:noreply, push_event(socket, "my_event", %{...})} + end + +Pushed events can then be picked up in a JS hook with `this.handleEvent`: + + mounted() { + this.handleEvent("my_event", data => console.log("from server:", data)); + } + +Clients can also push an event to the server and receive a reply with `this.pushEvent`: + + mounted() { + this.el.addEventListener("click", e => { + this.pushEvent("my_event", { one: 1 }, reply => console.log("got reply from server:", reply)); + }) + } + +Where the server handled it via: + + def handle_event("my_event", %{"one" => 1}, socket) do + {:reply, %{two: 2}, socket} + end + +### LiveView tests + +- `Phoenix.LiveViewTest` module and `LazyHTML` (included) for making your assertions +- Form tests are driven by `Phoenix.LiveViewTest`'s `render_submit/2` and `render_change/2` functions +- Come up with a step-by-step test plan that splits major test cases into small, isolated files. You may start with simpler tests that verify content exists, gradually add interaction tests +- **Always reference the key element IDs you added in the LiveView templates in your tests** for `Phoenix.LiveViewTest` functions like `element/2`, `has_element/2`, selectors, etc +- **Never** tests again raw HTML, **always** use `element/2`, `has_element/2`, and similar: `assert has_element?(view, "#my-form")` +- Instead of relying on testing text content, which can change, favor testing for the presence of key elements +- Focus on testing outcomes rather than implementation details +- Be aware that `Phoenix.Component` functions like `<.form>` might produce different HTML than expected. Test against the output HTML structure, not your mental model of what you expect it to be +- When facing test failures with element selectors, add debug statements to print the actual HTML, but use `LazyHTML` selectors to limit the output, ie: + + html = render(view) + document = LazyHTML.from_fragment(html) + matches = LazyHTML.filter(document, "your-complex-selector") + IO.inspect(matches, label: "Matches") + +### Form handling + +#### Creating a form from params + +If you want to create a form based on `handle_event` params: + + def handle_event("submitted", params, socket) do + {:noreply, assign(socket, form: to_form(params))} + end + +When you pass a map to `to_form/1`, it assumes said map contains the form params, which are expected to have string keys. + +You can also specify a name to nest the params: + + def handle_event("submitted", %{"user" => user_params}, socket) do + {:noreply, assign(socket, form: to_form(user_params, as: :user))} + end + +#### Creating a form from changesets + +When using changesets, the underlying data, form params, and errors are retrieved from it. The `:as` option is automatically computed too. E.g. if you have a user schema: + + defmodule MyApp.Users.User do + use Ecto.Schema + ... + end + +And then you create a changeset that you pass to `to_form`: + + %MyApp.Users.User{} + |> Ecto.Changeset.change() + |> to_form() + +Once the form is submitted, the params will be available under `%{"user" => user_params}`. + +In the template, the form form assign can be passed to the `<.form>` function component: + + <.form for={@form} id="todo-form" phx-change="validate" phx-submit="save"> + <.input field={@form[:field]} type="text" /> + </.form> + +Always give the form an explicit, unique DOM ID, like `id="todo-form"`. + +#### Avoiding form errors + +**Always** use a form assigned via `to_form/2` in the LiveView, and the `<.input>` component in the template. In the template **always access forms this**: + + <%!-- ALWAYS do this (valid) --%> + <.form for={@form} id="my-form"> + <.input field={@form[:field]} type="text" /> + </.form> + +And **never** do this: + + <%!-- NEVER do this (invalid) --%> + <.form for={@changeset} id="my-form"> + <.input field={@changeset[:field]} type="text" /> + </.form> + +- You are FORBIDDEN from accessing the changeset in the template as it will cause errors +- **Never** use `<.form let={f} ...>` in the template, instead **always use `<.form for={@form} ...>`**, then drive all form references from the form assign as in `@form[:field]`. The UI should **always** be driven by a `to_form/2` assigned in the LiveView module that is derived from a changeset +<!-- phoenix:liveview-end --> + +<!-- usage-rules-end --> \ No newline at end of file diff --git a/web-ng/README.md b/web-ng/README.md new file mode 100644 index 000000000..23763dc31 --- /dev/null +++ b/web-ng/README.md @@ -0,0 +1,56 @@ +# ServiceRadarWebNG + +To start your Phoenix server: + +* Run `mix setup` to install and setup dependencies +* Start Phoenix endpoint with `mix phx.server` or inside IEx with `iex -S mix phx.server` + +Now you can visit [`localhost:4000`](http://localhost:4000) from your browser. + +## Connecting to the existing CNPG (Docker/K8s) + +This app is intended to run against the existing ServiceRadar CNPG/AGE database (TimescaleDB + Apache AGE). + +### Docker (CNPG running on the docker host) + +The repo’s `docker-compose.yml` publishes CNPG on `${CNPG_PUBLIC_BIND:-127.0.0.1}:${CNPG_PUBLIC_PORT:-5455}` on the docker host. + +1. On the docker host, ensure CNPG’s server cert allows IP-based clients (example IP: `192.168.2.134`): + - Set `CNPG_CERT_EXTRA_IPS=192.168.2.134` and (re)run `docker compose up cert-generator` so `cnpg.pem` includes that SAN. + - Set `CNPG_PUBLIC_BIND=0.0.0.0` (or a specific LAN interface IP) so the published port is reachable from your workstation (put it in `.env` or export it before running compose). + - Restart the `cnpg` container after regenerating `cnpg.pem`. + +2. Export client certs from the docker host and copy them to your workstation (keep them out of the repo): + - Find the cert volume: `docker volume ls | rg 'cert-data'` + - Copy out `root.pem`, `workstation.pem`, `workstation-key.pem` from that volume. + +3. On your workstation, point `web-ng/` at CNPG: + +```bash +cd web-ng +export CNPG_HOST=192.168.2.134 +export CNPG_PORT=5455 +export CNPG_DATABASE=serviceradar +export CNPG_USERNAME=serviceradar +export CNPG_PASSWORD=serviceradar +export CNPG_SSL_MODE=verify-full +export CNPG_CERT_DIR=/path/to/private/serviceradar-certs +mix graph.ready +mix phx.server +``` + +If you are using `CNPG_SSL_MODE=verify-full` with an IP `CNPG_HOST`, the CNPG server cert must include that IP in its SAN (use `CNPG_CERT_EXTRA_IPS` as above). + +### Kubernetes + +Use `kubectl port-forward` to expose Postgres locally, then set `CNPG_HOST=localhost` and `CNPG_PORT=<forwarded-port>` (and the same TLS env vars if your cluster requires them). + +Ready to run in production? Please [check our deployment guides](https://hexdocs.pm/phoenix/deployment.html). + +## Learn more + +* Official website: https://www.phoenixframework.org/ +* Guides: https://hexdocs.pm/phoenix/overview.html +* Docs: https://hexdocs.pm/phoenix +* Forum: https://elixirforum.com/c/phoenix-forum +* Source: https://github.com/phoenixframework/phoenix diff --git a/web-ng/assets/css/app.css b/web-ng/assets/css/app.css new file mode 100644 index 000000000..db2b022eb --- /dev/null +++ b/web-ng/assets/css/app.css @@ -0,0 +1,105 @@ +/* See the Tailwind configuration guide for advanced usage + https://tailwindcss.com/docs/configuration */ + +@import "tailwindcss" source(none); +@source "../css"; +@source "../js"; +@source "../../lib/serviceradar_web_ng_web"; + +/* A Tailwind plugin that makes "hero-#{ICON}" classes available. + The heroicons installation itself is managed by your mix.exs */ +@plugin "../vendor/heroicons"; + +/* daisyUI Tailwind Plugin. You can update this file by fetching the latest version with: + curl -sLO https://github.com/saadeghi/daisyui/releases/latest/download/daisyui.js + Make sure to look at the daisyUI changelog: https://daisyui.com/docs/changelog/ */ +@plugin "../vendor/daisyui" { + themes: false; +} + +/* daisyUI theme plugin. You can update this file by fetching the latest version with: + curl -sLO https://github.com/saadeghi/daisyui/releases/latest/download/daisyui-theme.js + We ship with two themes, a light one inspired on Phoenix colors and a dark one inspired + on Elixir colors. Build your own at: https://daisyui.com/theme-generator/ */ +@plugin "../vendor/daisyui-theme" { + name: "dark"; + default: false; + prefersdark: true; + color-scheme: "dark"; + --color-base-100: #282a36; + --color-base-200: #21222c; + --color-base-300: #191a21; + --color-base-content: #f8f8f2; + --color-primary: #bd93f9; + --color-primary-content: #282a36; + --color-secondary: #ff79c6; + --color-secondary-content: #282a36; + --color-accent: #8be9fd; + --color-accent-content: #282a36; + --color-neutral: #44475a; + --color-neutral-content: #f8f8f2; + --color-info: #8be9fd; + --color-info-content: #282a36; + --color-success: #50fa7b; + --color-success-content: #282a36; + --color-warning: #ffb86c; + --color-warning-content: #282a36; + --color-error: #ff5555; + --color-error-content: #f8f8f2; + --radius-selector: 0.25rem; + --radius-field: 0.25rem; + --radius-box: 0.5rem; + --size-selector: 0.21875rem; + --size-field: 0.21875rem; + --border: 1px; + --depth: 1; + --noise: 0; +} + +@plugin "../vendor/daisyui-theme" { + name: "light"; + default: true; + prefersdark: false; + color-scheme: "light"; + --color-base-100: oklch(98% 0 0); + --color-base-200: oklch(96% 0.001 286.375); + --color-base-300: oklch(92% 0.004 286.32); + --color-base-content: oklch(21% 0.006 285.885); + --color-primary: oklch(70% 0.213 47.604); + --color-primary-content: oklch(98% 0.016 73.684); + --color-secondary: oklch(55% 0.027 264.364); + --color-secondary-content: oklch(98% 0.002 247.839); + --color-accent: oklch(0% 0 0); + --color-accent-content: oklch(100% 0 0); + --color-neutral: oklch(44% 0.017 285.786); + --color-neutral-content: oklch(98% 0 0); + --color-info: oklch(62% 0.214 259.815); + --color-info-content: oklch(97% 0.014 254.604); + --color-success: oklch(70% 0.14 182.503); + --color-success-content: oklch(98% 0.014 180.72); + --color-warning: oklch(66% 0.179 58.318); + --color-warning-content: oklch(98% 0.022 95.277); + --color-error: oklch(58% 0.253 17.585); + --color-error-content: oklch(96% 0.015 12.422); + --radius-selector: 0.25rem; + --radius-field: 0.25rem; + --radius-box: 0.5rem; + --size-selector: 0.21875rem; + --size-field: 0.21875rem; + --border: 1.5px; + --depth: 1; + --noise: 0; +} + +/* Add variants based on LiveView classes */ +@custom-variant phx-click-loading (.phx-click-loading&, .phx-click-loading &); +@custom-variant phx-submit-loading (.phx-submit-loading&, .phx-submit-loading &); +@custom-variant phx-change-loading (.phx-change-loading&, .phx-change-loading &); + +/* Use the data attribute for dark mode */ +@custom-variant dark (&:where([data-theme=dark], [data-theme=dark] *)); + +/* Make LiveView wrapper divs transparent for layout */ +[data-phx-session], [data-phx-teleported-src] { display: contents } + +/* This file is for your main application CSS */ diff --git a/web-ng/assets/js/app.js b/web-ng/assets/js/app.js new file mode 100644 index 000000000..696ecc6b2 --- /dev/null +++ b/web-ng/assets/js/app.js @@ -0,0 +1,142 @@ +// If you want to use Phoenix channels, run `mix help phx.gen.channel` +// to get started and then uncomment the line below. +// import "./user_socket.js" + +// You can include dependencies in two ways. +// +// The simplest option is to put them in assets/vendor and +// import them using relative paths: +// +// import "../vendor/some-package.js" +// +// Alternatively, you can `npm install some-package --prefix assets` and import +// them using a path starting with the package name: +// +// import "some-package" +// +// If you have dependencies that try to import CSS, esbuild will generate a separate `app.css` file. +// To load it, simply add a second `<link>` to your `root.html.heex` file. + +// Include phoenix_html to handle method=PUT/DELETE in forms and buttons. +import "phoenix_html" +// Establish Phoenix Socket and LiveView configuration. +import {Socket} from "phoenix" +import {LiveSocket} from "phoenix_live_view" +import {hooks as colocatedHooks} from "phoenix-colocated/serviceradar_web_ng" +import topbar from "../vendor/topbar" + +// Custom hooks +const Hooks = { + TimeseriesChart: { + mounted() { + const el = this.el + const svg = el.querySelector('svg') + const tooltip = el.querySelector('[data-tooltip]') + const hoverLine = el.querySelector('[data-hover-line]') + const pointsData = JSON.parse(el.dataset.points || '[]') + + if (!svg || !tooltip || !hoverLine || pointsData.length === 0) return + + const svgContainer = svg.parentElement + + const showTooltip = (e) => { + const rect = svg.getBoundingClientRect() + const x = e.clientX - rect.left + const pct = Math.max(0, Math.min(1, x / rect.width)) + const idx = Math.round(pct * (pointsData.length - 1)) + const point = pointsData[idx] + + if (point) { + const value = typeof point.v === 'number' + ? point.v.toFixed(2) + : point.v + tooltip.textContent = `${value} @ ${point.dt}` + tooltip.classList.remove('hidden') + hoverLine.classList.remove('hidden') + + // Position tooltip + const tooltipX = Math.min(rect.width - tooltip.offsetWidth - 8, Math.max(8, x - tooltip.offsetWidth / 2)) + tooltip.style.left = `${tooltipX}px` + tooltip.style.top = '-24px' + + // Position hover line + hoverLine.style.left = `${x}px` + } + } + + const hideTooltip = () => { + tooltip.classList.add('hidden') + hoverLine.classList.add('hidden') + } + + svgContainer.addEventListener('mousemove', showTooltip) + svgContainer.addEventListener('mouseleave', hideTooltip) + + // Store cleanup function + this.cleanup = () => { + svgContainer.removeEventListener('mousemove', showTooltip) + svgContainer.removeEventListener('mouseleave', hideTooltip) + } + }, + destroyed() { + if (this.cleanup) this.cleanup() + } + } +} + +const csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content") +const liveSocket = new LiveSocket("/live", Socket, { + longPollFallbackMs: 2500, + params: {_csrf_token: csrfToken}, + hooks: {...colocatedHooks, ...Hooks}, +}) + +// Show progress bar on live navigation and form submits +topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"}) +window.addEventListener("phx:page-loading-start", _info => topbar.show(300)) +window.addEventListener("phx:page-loading-stop", _info => topbar.hide()) + +// connect if there are any LiveViews on the page +liveSocket.connect() + +// expose liveSocket on window for web console debug logs and latency simulation: +// >> liveSocket.enableDebug() +// >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session +// >> liveSocket.disableLatencySim() +window.liveSocket = liveSocket + +// The lines below enable quality of life phoenix_live_reload +// development features: +// +// 1. stream server logs to the browser console +// 2. click on elements to jump to their definitions in your code editor +// +if (process.env.NODE_ENV === "development") { + window.addEventListener("phx:live_reload:attached", ({detail: reloader}) => { + // Enable server log streaming to client. + // Disable with reloader.disableServerLogs() + reloader.enableServerLogs() + + // Open configured PLUG_EDITOR at file:line of the clicked element's HEEx component + // + // * click with "c" key pressed to open at caller location + // * click with "d" key pressed to open at function component definition location + let keyDown + window.addEventListener("keydown", e => keyDown = e.key) + window.addEventListener("keyup", _e => keyDown = null) + window.addEventListener("click", e => { + if(keyDown === "c"){ + e.preventDefault() + e.stopImmediatePropagation() + reloader.openEditorAtCaller(e.target) + } else if(keyDown === "d"){ + e.preventDefault() + e.stopImmediatePropagation() + reloader.openEditorAtDef(e.target) + } + }, true) + + window.liveReloader = reloader + }) +} + diff --git a/web-ng/assets/tsconfig.json b/web-ng/assets/tsconfig.json new file mode 100644 index 000000000..a9401b623 --- /dev/null +++ b/web-ng/assets/tsconfig.json @@ -0,0 +1,32 @@ +// This file is needed on most editors to enable the intelligent autocompletion +// of LiveView's JavaScript API methods. You can safely delete it if you don't need it. +// +// Note: This file assumes a basic esbuild setup without node_modules. +// We include a generic paths alias to deps to mimic how esbuild resolves +// the Phoenix and LiveView JavaScript assets. +// If you have a package.json in your project, you should remove the +// paths configuration and instead add the phoenix dependencies to the +// dependencies section of your package.json: +// +// { +// ... +// "dependencies": { +// ..., +// "phoenix": "../deps/phoenix", +// "phoenix_html": "../deps/phoenix_html", +// "phoenix_live_view": "../deps/phoenix_live_view" +// } +// } +// +// Feel free to adjust this configuration however you need. +{ + "compilerOptions": { + "baseUrl": ".", + "paths": { + "*": ["../deps/*"] + }, + "allowJs": true, + "noEmit": true + }, + "include": ["js/**/*"] +} diff --git a/web-ng/assets/vendor/daisyui-theme.js b/web-ng/assets/vendor/daisyui-theme.js new file mode 100644 index 000000000..169c806a1 --- /dev/null +++ b/web-ng/assets/vendor/daisyui-theme.js @@ -0,0 +1,124 @@ +/** 🌼 + * @license MIT + * daisyUI bundle + * https://daisyui.com/ + */ + +var __defProp = Object.defineProperty; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __moduleCache = /* @__PURE__ */ new WeakMap; +var __toCommonJS = (from) => { + var entry = __moduleCache.get(from), desc; + if (entry) + return entry; + entry = __defProp({}, "__esModule", { value: true }); + if (from && typeof from === "object" || typeof from === "function") + __getOwnPropNames(from).map((key) => !__hasOwnProp.call(entry, key) && __defProp(entry, key, { + get: () => from[key], + enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable + })); + __moduleCache.set(from, entry); + return entry; +}; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { + get: all[name], + enumerable: true, + configurable: true, + set: (newValue) => all[name] = () => newValue + }); +}; + +// packages/daisyui/theme/index.js +var exports_theme = {}; +__export(exports_theme, { + default: () => theme_default +}); +module.exports = __toCommonJS(exports_theme); + +// packages/daisyui/functions/plugin.js +var plugin = { + withOptions: (pluginFunction, configFunction = () => ({})) => { + const optionsFunction = (options) => { + const handler = pluginFunction(options); + const config = configFunction(options); + return { handler, config }; + }; + optionsFunction.__isOptionsFunction = true; + return optionsFunction; + } +}; + +// packages/daisyui/theme/object.js +var object_default = { cyberpunk: { "color-scheme": "light", "--color-base-100": "oklch(94.51% 0.179 104.32)", "--color-base-200": "oklch(91.51% 0.179 104.32)", "--color-base-300": "oklch(85.51% 0.179 104.32)", "--color-base-content": "oklch(0% 0 0)", "--color-primary": "oklch(74.22% 0.209 6.35)", "--color-primary-content": "oklch(14.844% 0.041 6.35)", "--color-secondary": "oklch(83.33% 0.184 204.72)", "--color-secondary-content": "oklch(16.666% 0.036 204.72)", "--color-accent": "oklch(71.86% 0.217 310.43)", "--color-accent-content": "oklch(14.372% 0.043 310.43)", "--color-neutral": "oklch(23.04% 0.065 269.31)", "--color-neutral-content": "oklch(94.51% 0.179 104.32)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "0rem", "--radius-field": "0rem", "--radius-box": "0rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, acid: { "color-scheme": "light", "--color-base-100": "oklch(98% 0 0)", "--color-base-200": "oklch(95% 0 0)", "--color-base-300": "oklch(91% 0 0)", "--color-base-content": "oklch(0% 0 0)", "--color-primary": "oklch(71.9% 0.357 330.759)", "--color-primary-content": "oklch(14.38% 0.071 330.759)", "--color-secondary": "oklch(73.37% 0.224 48.25)", "--color-secondary-content": "oklch(14.674% 0.044 48.25)", "--color-accent": "oklch(92.78% 0.264 122.962)", "--color-accent-content": "oklch(18.556% 0.052 122.962)", "--color-neutral": "oklch(21.31% 0.128 278.68)", "--color-neutral-content": "oklch(84.262% 0.025 278.68)", "--color-info": "oklch(60.72% 0.227 252.05)", "--color-info-content": "oklch(12.144% 0.045 252.05)", "--color-success": "oklch(85.72% 0.266 158.53)", "--color-success-content": "oklch(17.144% 0.053 158.53)", "--color-warning": "oklch(91.01% 0.212 100.5)", "--color-warning-content": "oklch(18.202% 0.042 100.5)", "--color-error": "oklch(64.84% 0.293 29.349)", "--color-error-content": "oklch(12.968% 0.058 29.349)", "--radius-selector": "1rem", "--radius-field": "1rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, black: { "color-scheme": "dark", "--color-base-100": "oklch(0% 0 0)", "--color-base-200": "oklch(19% 0 0)", "--color-base-300": "oklch(22% 0 0)", "--color-base-content": "oklch(87.609% 0 0)", "--color-primary": "oklch(35% 0 0)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(35% 0 0)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(35% 0 0)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(35% 0 0)", "--color-neutral-content": "oklch(100% 0 0)", "--color-info": "oklch(45.201% 0.313 264.052)", "--color-info-content": "oklch(89.04% 0.062 264.052)", "--color-success": "oklch(51.975% 0.176 142.495)", "--color-success-content": "oklch(90.395% 0.035 142.495)", "--color-warning": "oklch(96.798% 0.211 109.769)", "--color-warning-content": "oklch(19.359% 0.042 109.769)", "--color-error": "oklch(62.795% 0.257 29.233)", "--color-error-content": "oklch(12.559% 0.051 29.233)", "--radius-selector": "0rem", "--radius-field": "0rem", "--radius-box": "0rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, dark: { "color-scheme": "dark", "--color-base-100": "oklch(25.33% 0.016 252.42)", "--color-base-200": "oklch(23.26% 0.014 253.1)", "--color-base-300": "oklch(21.15% 0.012 254.09)", "--color-base-content": "oklch(97.807% 0.029 256.847)", "--color-primary": "oklch(58% 0.233 277.117)", "--color-primary-content": "oklch(96% 0.018 272.314)", "--color-secondary": "oklch(65% 0.241 354.308)", "--color-secondary-content": "oklch(94% 0.028 342.258)", "--color-accent": "oklch(77% 0.152 181.912)", "--color-accent-content": "oklch(38% 0.063 188.416)", "--color-neutral": "oklch(14% 0.005 285.823)", "--color-neutral-content": "oklch(92% 0.004 286.32)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(76% 0.177 163.223)", "--color-success-content": "oklch(37% 0.077 168.94)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(71% 0.194 13.428)", "--color-error-content": "oklch(27% 0.105 12.094)", "--radius-selector": "0.5rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, light: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(98% 0 0)", "--color-base-300": "oklch(95% 0 0)", "--color-base-content": "oklch(21% 0.006 285.885)", "--color-primary": "oklch(45% 0.24 277.023)", "--color-primary-content": "oklch(93% 0.034 272.788)", "--color-secondary": "oklch(65% 0.241 354.308)", "--color-secondary-content": "oklch(94% 0.028 342.258)", "--color-accent": "oklch(77% 0.152 181.912)", "--color-accent-content": "oklch(38% 0.063 188.416)", "--color-neutral": "oklch(14% 0.005 285.823)", "--color-neutral-content": "oklch(92% 0.004 286.32)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(76% 0.177 163.223)", "--color-success-content": "oklch(37% 0.077 168.94)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(71% 0.194 13.428)", "--color-error-content": "oklch(27% 0.105 12.094)", "--radius-selector": "0.5rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, luxury: { "color-scheme": "dark", "--color-base-100": "oklch(14.076% 0.004 285.822)", "--color-base-200": "oklch(20.219% 0.004 308.229)", "--color-base-300": "oklch(23.219% 0.004 308.229)", "--color-base-content": "oklch(75.687% 0.123 76.89)", "--color-primary": "oklch(100% 0 0)", "--color-primary-content": "oklch(20% 0 0)", "--color-secondary": "oklch(27.581% 0.064 261.069)", "--color-secondary-content": "oklch(85.516% 0.012 261.069)", "--color-accent": "oklch(36.674% 0.051 338.825)", "--color-accent-content": "oklch(87.334% 0.01 338.825)", "--color-neutral": "oklch(24.27% 0.057 59.825)", "--color-neutral-content": "oklch(93.203% 0.089 90.861)", "--color-info": "oklch(79.061% 0.121 237.133)", "--color-info-content": "oklch(15.812% 0.024 237.133)", "--color-success": "oklch(78.119% 0.192 132.154)", "--color-success-content": "oklch(15.623% 0.038 132.154)", "--color-warning": "oklch(86.127% 0.136 102.891)", "--color-warning-content": "oklch(17.225% 0.027 102.891)", "--color-error": "oklch(71.753% 0.176 22.568)", "--color-error-content": "oklch(14.35% 0.035 22.568)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, dracula: { "color-scheme": "dark", "--color-base-100": "oklch(28.822% 0.022 277.508)", "--color-base-200": "oklch(26.805% 0.02 277.508)", "--color-base-300": "oklch(24.787% 0.019 277.508)", "--color-base-content": "oklch(97.747% 0.007 106.545)", "--color-primary": "oklch(75.461% 0.183 346.812)", "--color-primary-content": "oklch(15.092% 0.036 346.812)", "--color-secondary": "oklch(74.202% 0.148 301.883)", "--color-secondary-content": "oklch(14.84% 0.029 301.883)", "--color-accent": "oklch(83.392% 0.124 66.558)", "--color-accent-content": "oklch(16.678% 0.024 66.558)", "--color-neutral": "oklch(39.445% 0.032 275.524)", "--color-neutral-content": "oklch(87.889% 0.006 275.524)", "--color-info": "oklch(88.263% 0.093 212.846)", "--color-info-content": "oklch(17.652% 0.018 212.846)", "--color-success": "oklch(87.099% 0.219 148.024)", "--color-success-content": "oklch(17.419% 0.043 148.024)", "--color-warning": "oklch(95.533% 0.134 112.757)", "--color-warning-content": "oklch(19.106% 0.026 112.757)", "--color-error": "oklch(68.22% 0.206 24.43)", "--color-error-content": "oklch(13.644% 0.041 24.43)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, retro: { "color-scheme": "light", "--color-base-100": "oklch(91.637% 0.034 90.515)", "--color-base-200": "oklch(88.272% 0.049 91.774)", "--color-base-300": "oklch(84.133% 0.065 90.856)", "--color-base-content": "oklch(41% 0.112 45.904)", "--color-primary": "oklch(80% 0.114 19.571)", "--color-primary-content": "oklch(39% 0.141 25.723)", "--color-secondary": "oklch(92% 0.084 155.995)", "--color-secondary-content": "oklch(44% 0.119 151.328)", "--color-accent": "oklch(68% 0.162 75.834)", "--color-accent-content": "oklch(41% 0.112 45.904)", "--color-neutral": "oklch(44% 0.011 73.639)", "--color-neutral-content": "oklch(86% 0.005 56.366)", "--color-info": "oklch(58% 0.158 241.966)", "--color-info-content": "oklch(96% 0.059 95.617)", "--color-success": "oklch(51% 0.096 186.391)", "--color-success-content": "oklch(96% 0.059 95.617)", "--color-warning": "oklch(64% 0.222 41.116)", "--color-warning-content": "oklch(96% 0.059 95.617)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(40% 0.123 38.172)", "--radius-selector": "0.25rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, lofi: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97% 0 0)", "--color-base-300": "oklch(94% 0 0)", "--color-base-content": "oklch(0% 0 0)", "--color-primary": "oklch(15.906% 0 0)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(21.455% 0.001 17.278)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(26.861% 0 0)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(0% 0 0)", "--color-neutral-content": "oklch(100% 0 0)", "--color-info": "oklch(79.54% 0.103 205.9)", "--color-info-content": "oklch(15.908% 0.02 205.9)", "--color-success": "oklch(90.13% 0.153 164.14)", "--color-success-content": "oklch(18.026% 0.03 164.14)", "--color-warning": "oklch(88.37% 0.135 79.94)", "--color-warning-content": "oklch(17.674% 0.027 79.94)", "--color-error": "oklch(78.66% 0.15 28.47)", "--color-error-content": "oklch(15.732% 0.03 28.47)", "--radius-selector": "2rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, valentine: { "color-scheme": "light", "--color-base-100": "oklch(97% 0.014 343.198)", "--color-base-200": "oklch(94% 0.028 342.258)", "--color-base-300": "oklch(89% 0.061 343.231)", "--color-base-content": "oklch(52% 0.223 3.958)", "--color-primary": "oklch(65% 0.241 354.308)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(62% 0.265 303.9)", "--color-secondary-content": "oklch(97% 0.014 308.299)", "--color-accent": "oklch(82% 0.111 230.318)", "--color-accent-content": "oklch(39% 0.09 240.876)", "--color-neutral": "oklch(40% 0.153 2.432)", "--color-neutral-content": "oklch(89% 0.061 343.231)", "--color-info": "oklch(86% 0.127 207.078)", "--color-info-content": "oklch(44% 0.11 240.79)", "--color-success": "oklch(84% 0.143 164.978)", "--color-success-content": "oklch(43% 0.095 166.913)", "--color-warning": "oklch(75% 0.183 55.934)", "--color-warning-content": "oklch(26% 0.079 36.259)", "--color-error": "oklch(63% 0.237 25.331)", "--color-error-content": "oklch(97% 0.013 17.38)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, nord: { "color-scheme": "light", "--color-base-100": "oklch(95.127% 0.007 260.731)", "--color-base-200": "oklch(93.299% 0.01 261.788)", "--color-base-300": "oklch(89.925% 0.016 262.749)", "--color-base-content": "oklch(32.437% 0.022 264.182)", "--color-primary": "oklch(59.435% 0.077 254.027)", "--color-primary-content": "oklch(11.887% 0.015 254.027)", "--color-secondary": "oklch(69.651% 0.059 248.687)", "--color-secondary-content": "oklch(13.93% 0.011 248.687)", "--color-accent": "oklch(77.464% 0.062 217.469)", "--color-accent-content": "oklch(15.492% 0.012 217.469)", "--color-neutral": "oklch(45.229% 0.035 264.131)", "--color-neutral-content": "oklch(89.925% 0.016 262.749)", "--color-info": "oklch(69.207% 0.062 332.664)", "--color-info-content": "oklch(13.841% 0.012 332.664)", "--color-success": "oklch(76.827% 0.074 131.063)", "--color-success-content": "oklch(15.365% 0.014 131.063)", "--color-warning": "oklch(85.486% 0.089 84.093)", "--color-warning-content": "oklch(17.097% 0.017 84.093)", "--color-error": "oklch(60.61% 0.12 15.341)", "--color-error-content": "oklch(12.122% 0.024 15.341)", "--radius-selector": "1rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, lemonade: { "color-scheme": "light", "--color-base-100": "oklch(98.71% 0.02 123.72)", "--color-base-200": "oklch(91.8% 0.018 123.72)", "--color-base-300": "oklch(84.89% 0.017 123.72)", "--color-base-content": "oklch(19.742% 0.004 123.72)", "--color-primary": "oklch(58.92% 0.199 134.6)", "--color-primary-content": "oklch(11.784% 0.039 134.6)", "--color-secondary": "oklch(77.75% 0.196 111.09)", "--color-secondary-content": "oklch(15.55% 0.039 111.09)", "--color-accent": "oklch(85.39% 0.201 100.73)", "--color-accent-content": "oklch(17.078% 0.04 100.73)", "--color-neutral": "oklch(30.98% 0.075 108.6)", "--color-neutral-content": "oklch(86.196% 0.015 108.6)", "--color-info": "oklch(86.19% 0.047 224.14)", "--color-info-content": "oklch(17.238% 0.009 224.14)", "--color-success": "oklch(86.19% 0.047 157.85)", "--color-success-content": "oklch(17.238% 0.009 157.85)", "--color-warning": "oklch(86.19% 0.047 102.15)", "--color-warning-content": "oklch(17.238% 0.009 102.15)", "--color-error": "oklch(86.19% 0.047 25.85)", "--color-error-content": "oklch(17.238% 0.009 25.85)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, garden: { "color-scheme": "light", "--color-base-100": "oklch(92.951% 0.002 17.197)", "--color-base-200": "oklch(86.445% 0.002 17.197)", "--color-base-300": "oklch(79.938% 0.001 17.197)", "--color-base-content": "oklch(16.961% 0.001 17.32)", "--color-primary": "oklch(62.45% 0.278 3.836)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(48.495% 0.11 355.095)", "--color-secondary-content": "oklch(89.699% 0.022 355.095)", "--color-accent": "oklch(56.273% 0.054 154.39)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(24.155% 0.049 89.07)", "--color-neutral-content": "oklch(92.951% 0.002 17.197)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, aqua: { "color-scheme": "dark", "--color-base-100": "oklch(37% 0.146 265.522)", "--color-base-200": "oklch(28% 0.091 267.935)", "--color-base-300": "oklch(22% 0.091 267.935)", "--color-base-content": "oklch(90% 0.058 230.902)", "--color-primary": "oklch(85.661% 0.144 198.645)", "--color-primary-content": "oklch(40.124% 0.068 197.603)", "--color-secondary": "oklch(60.682% 0.108 309.782)", "--color-secondary-content": "oklch(96% 0.016 293.756)", "--color-accent": "oklch(93.426% 0.102 94.555)", "--color-accent-content": "oklch(18.685% 0.02 94.555)", "--color-neutral": "oklch(27% 0.146 265.522)", "--color-neutral-content": "oklch(80% 0.146 265.522)", "--color-info": "oklch(54.615% 0.215 262.88)", "--color-info-content": "oklch(90.923% 0.043 262.88)", "--color-success": "oklch(62.705% 0.169 149.213)", "--color-success-content": "oklch(12.541% 0.033 149.213)", "--color-warning": "oklch(66.584% 0.157 58.318)", "--color-warning-content": "oklch(27% 0.077 45.635)", "--color-error": "oklch(73.95% 0.19 27.33)", "--color-error-content": "oklch(14.79% 0.038 27.33)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, corporate: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(93% 0 0)", "--color-base-300": "oklch(86% 0 0)", "--color-base-content": "oklch(22.389% 0.031 278.072)", "--color-primary": "oklch(58% 0.158 241.966)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(55% 0.046 257.417)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(60% 0.118 184.704)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(0% 0 0)", "--color-neutral-content": "oklch(100% 0 0)", "--color-info": "oklch(60% 0.126 221.723)", "--color-info-content": "oklch(100% 0 0)", "--color-success": "oklch(62% 0.194 149.214)", "--color-success-content": "oklch(100% 0 0)", "--color-warning": "oklch(85% 0.199 91.936)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "0.25rem", "--radius-field": "0.25rem", "--radius-box": "0.25rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, pastel: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(98.462% 0.001 247.838)", "--color-base-300": "oklch(92.462% 0.001 247.838)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(90% 0.063 306.703)", "--color-primary-content": "oklch(49% 0.265 301.924)", "--color-secondary": "oklch(89% 0.058 10.001)", "--color-secondary-content": "oklch(51% 0.222 16.935)", "--color-accent": "oklch(90% 0.093 164.15)", "--color-accent-content": "oklch(50% 0.118 165.612)", "--color-neutral": "oklch(55% 0.046 257.417)", "--color-neutral-content": "oklch(92% 0.013 255.508)", "--color-info": "oklch(86% 0.127 207.078)", "--color-info-content": "oklch(52% 0.105 223.128)", "--color-success": "oklch(87% 0.15 154.449)", "--color-success-content": "oklch(52% 0.154 150.069)", "--color-warning": "oklch(83% 0.128 66.29)", "--color-warning-content": "oklch(55% 0.195 38.402)", "--color-error": "oklch(80% 0.114 19.571)", "--color-error-content": "oklch(50% 0.213 27.518)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "0", "--noise": "0" }, bumblebee: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97% 0 0)", "--color-base-300": "oklch(92% 0 0)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(85% 0.199 91.936)", "--color-primary-content": "oklch(42% 0.095 57.708)", "--color-secondary": "oklch(75% 0.183 55.934)", "--color-secondary-content": "oklch(40% 0.123 38.172)", "--color-accent": "oklch(0% 0 0)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(37% 0.01 67.558)", "--color-neutral-content": "oklch(92% 0.003 48.717)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(39% 0.09 240.876)", "--color-success": "oklch(76% 0.177 163.223)", "--color-success-content": "oklch(37% 0.077 168.94)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(39% 0.141 25.723)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, coffee: { "color-scheme": "dark", "--color-base-100": "oklch(24% 0.023 329.708)", "--color-base-200": "oklch(21% 0.021 329.708)", "--color-base-300": "oklch(16% 0.019 329.708)", "--color-base-content": "oklch(72.354% 0.092 79.129)", "--color-primary": "oklch(71.996% 0.123 62.756)", "--color-primary-content": "oklch(14.399% 0.024 62.756)", "--color-secondary": "oklch(34.465% 0.029 199.194)", "--color-secondary-content": "oklch(86.893% 0.005 199.194)", "--color-accent": "oklch(42.621% 0.074 224.389)", "--color-accent-content": "oklch(88.524% 0.014 224.389)", "--color-neutral": "oklch(16.51% 0.015 326.261)", "--color-neutral-content": "oklch(83.302% 0.003 326.261)", "--color-info": "oklch(79.49% 0.063 184.558)", "--color-info-content": "oklch(15.898% 0.012 184.558)", "--color-success": "oklch(74.722% 0.072 131.116)", "--color-success-content": "oklch(14.944% 0.014 131.116)", "--color-warning": "oklch(88.15% 0.14 87.722)", "--color-warning-content": "oklch(17.63% 0.028 87.722)", "--color-error": "oklch(77.318% 0.128 31.871)", "--color-error-content": "oklch(15.463% 0.025 31.871)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, silk: { "color-scheme": "light", "--color-base-100": "oklch(97% 0.0035 67.78)", "--color-base-200": "oklch(95% 0.0081 61.42)", "--color-base-300": "oklch(90% 0.0081 61.42)", "--color-base-content": "oklch(40% 0.0081 61.42)", "--color-primary": "oklch(23.27% 0.0249 284.3)", "--color-primary-content": "oklch(94.22% 0.2505 117.44)", "--color-secondary": "oklch(23.27% 0.0249 284.3)", "--color-secondary-content": "oklch(73.92% 0.2135 50.94)", "--color-accent": "oklch(23.27% 0.0249 284.3)", "--color-accent-content": "oklch(88.92% 0.2061 189.9)", "--color-neutral": "oklch(20% 0 0)", "--color-neutral-content": "oklch(80% 0.0081 61.42)", "--color-info": "oklch(80.39% 0.1148 241.68)", "--color-info-content": "oklch(30.39% 0.1148 241.68)", "--color-success": "oklch(83.92% 0.0901 136.87)", "--color-success-content": "oklch(23.92% 0.0901 136.87)", "--color-warning": "oklch(83.92% 0.1085 80)", "--color-warning-content": "oklch(43.92% 0.1085 80)", "--color-error": "oklch(75.1% 0.1814 22.37)", "--color-error-content": "oklch(35.1% 0.1814 22.37)", "--radius-selector": "2rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "1", "--noise": "0" }, sunset: { "color-scheme": "dark", "--color-base-100": "oklch(22% 0.019 237.69)", "--color-base-200": "oklch(20% 0.019 237.69)", "--color-base-300": "oklch(18% 0.019 237.69)", "--color-base-content": "oklch(77.383% 0.043 245.096)", "--color-primary": "oklch(74.703% 0.158 39.947)", "--color-primary-content": "oklch(14.94% 0.031 39.947)", "--color-secondary": "oklch(72.537% 0.177 2.72)", "--color-secondary-content": "oklch(14.507% 0.035 2.72)", "--color-accent": "oklch(71.294% 0.166 299.844)", "--color-accent-content": "oklch(14.258% 0.033 299.844)", "--color-neutral": "oklch(26% 0.019 237.69)", "--color-neutral-content": "oklch(70% 0.019 237.69)", "--color-info": "oklch(85.559% 0.085 206.015)", "--color-info-content": "oklch(17.111% 0.017 206.015)", "--color-success": "oklch(85.56% 0.085 144.778)", "--color-success-content": "oklch(17.112% 0.017 144.778)", "--color-warning": "oklch(85.569% 0.084 74.427)", "--color-warning-content": "oklch(17.113% 0.016 74.427)", "--color-error": "oklch(85.511% 0.078 16.886)", "--color-error-content": "oklch(17.102% 0.015 16.886)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, synthwave: { "color-scheme": "dark", "--color-base-100": "oklch(15% 0.09 281.288)", "--color-base-200": "oklch(20% 0.09 281.288)", "--color-base-300": "oklch(25% 0.09 281.288)", "--color-base-content": "oklch(78% 0.115 274.713)", "--color-primary": "oklch(71% 0.202 349.761)", "--color-primary-content": "oklch(28% 0.109 3.907)", "--color-secondary": "oklch(82% 0.111 230.318)", "--color-secondary-content": "oklch(29% 0.066 243.157)", "--color-accent": "oklch(75% 0.183 55.934)", "--color-accent-content": "oklch(26% 0.079 36.259)", "--color-neutral": "oklch(45% 0.24 277.023)", "--color-neutral-content": "oklch(87% 0.065 274.039)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(77% 0.152 181.912)", "--color-success-content": "oklch(27% 0.046 192.524)", "--color-warning": "oklch(90% 0.182 98.111)", "--color-warning-content": "oklch(42% 0.095 57.708)", "--color-error": "oklch(73.7% 0.121 32.639)", "--color-error-content": "oklch(23.501% 0.096 290.329)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, dim: { "color-scheme": "dark", "--color-base-100": "oklch(30.857% 0.023 264.149)", "--color-base-200": "oklch(28.036% 0.019 264.182)", "--color-base-300": "oklch(26.346% 0.018 262.177)", "--color-base-content": "oklch(82.901% 0.031 222.959)", "--color-primary": "oklch(86.133% 0.141 139.549)", "--color-primary-content": "oklch(17.226% 0.028 139.549)", "--color-secondary": "oklch(73.375% 0.165 35.353)", "--color-secondary-content": "oklch(14.675% 0.033 35.353)", "--color-accent": "oklch(74.229% 0.133 311.379)", "--color-accent-content": "oklch(14.845% 0.026 311.379)", "--color-neutral": "oklch(24.731% 0.02 264.094)", "--color-neutral-content": "oklch(82.901% 0.031 222.959)", "--color-info": "oklch(86.078% 0.142 206.182)", "--color-info-content": "oklch(17.215% 0.028 206.182)", "--color-success": "oklch(86.171% 0.142 166.534)", "--color-success-content": "oklch(17.234% 0.028 166.534)", "--color-warning": "oklch(86.163% 0.142 94.818)", "--color-warning-content": "oklch(17.232% 0.028 94.818)", "--color-error": "oklch(82.418% 0.099 33.756)", "--color-error-content": "oklch(16.483% 0.019 33.756)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, abyss: { "color-scheme": "dark", "--color-base-100": "oklch(20% 0.08 209)", "--color-base-200": "oklch(15% 0.08 209)", "--color-base-300": "oklch(10% 0.08 209)", "--color-base-content": "oklch(90% 0.076 70.697)", "--color-primary": "oklch(92% 0.2653 125)", "--color-primary-content": "oklch(50% 0.2653 125)", "--color-secondary": "oklch(83.27% 0.0764 298.3)", "--color-secondary-content": "oklch(43.27% 0.0764 298.3)", "--color-accent": "oklch(43% 0 0)", "--color-accent-content": "oklch(98% 0 0)", "--color-neutral": "oklch(30% 0.08 209)", "--color-neutral-content": "oklch(90% 0.076 70.697)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(79% 0.209 151.711)", "--color-success-content": "oklch(26% 0.065 152.934)", "--color-warning": "oklch(84.8% 0.1962 84.62)", "--color-warning-content": "oklch(44.8% 0.1962 84.62)", "--color-error": "oklch(65% 0.1985 24.22)", "--color-error-content": "oklch(27% 0.1985 24.22)", "--radius-selector": "2rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, forest: { "color-scheme": "dark", "--color-base-100": "oklch(20.84% 0.008 17.911)", "--color-base-200": "oklch(18.522% 0.007 17.911)", "--color-base-300": "oklch(16.203% 0.007 17.911)", "--color-base-content": "oklch(83.768% 0.001 17.911)", "--color-primary": "oklch(68.628% 0.185 148.958)", "--color-primary-content": "oklch(0% 0 0)", "--color-secondary": "oklch(69.776% 0.135 168.327)", "--color-secondary-content": "oklch(13.955% 0.027 168.327)", "--color-accent": "oklch(70.628% 0.119 185.713)", "--color-accent-content": "oklch(14.125% 0.023 185.713)", "--color-neutral": "oklch(30.698% 0.039 171.364)", "--color-neutral-content": "oklch(86.139% 0.007 171.364)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, night: { "color-scheme": "dark", "--color-base-100": "oklch(20.768% 0.039 265.754)", "--color-base-200": "oklch(19.314% 0.037 265.754)", "--color-base-300": "oklch(17.86% 0.034 265.754)", "--color-base-content": "oklch(84.153% 0.007 265.754)", "--color-primary": "oklch(75.351% 0.138 232.661)", "--color-primary-content": "oklch(15.07% 0.027 232.661)", "--color-secondary": "oklch(68.011% 0.158 276.934)", "--color-secondary-content": "oklch(13.602% 0.031 276.934)", "--color-accent": "oklch(72.36% 0.176 350.048)", "--color-accent-content": "oklch(14.472% 0.035 350.048)", "--color-neutral": "oklch(27.949% 0.036 260.03)", "--color-neutral-content": "oklch(85.589% 0.007 260.03)", "--color-info": "oklch(68.455% 0.148 237.251)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(78.452% 0.132 181.911)", "--color-success-content": "oklch(15.69% 0.026 181.911)", "--color-warning": "oklch(83.242% 0.139 82.95)", "--color-warning-content": "oklch(16.648% 0.027 82.95)", "--color-error": "oklch(71.785% 0.17 13.118)", "--color-error-content": "oklch(14.357% 0.034 13.118)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, caramellatte: { "color-scheme": "light", "--color-base-100": "oklch(98% 0.016 73.684)", "--color-base-200": "oklch(95% 0.038 75.164)", "--color-base-300": "oklch(90% 0.076 70.697)", "--color-base-content": "oklch(40% 0.123 38.172)", "--color-primary": "oklch(0% 0 0)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(22.45% 0.075 37.85)", "--color-secondary-content": "oklch(90% 0.076 70.697)", "--color-accent": "oklch(46.44% 0.111 37.85)", "--color-accent-content": "oklch(90% 0.076 70.697)", "--color-neutral": "oklch(55% 0.195 38.402)", "--color-neutral-content": "oklch(98% 0.016 73.684)", "--color-info": "oklch(42% 0.199 265.638)", "--color-info-content": "oklch(90% 0.076 70.697)", "--color-success": "oklch(43% 0.095 166.913)", "--color-success-content": "oklch(90% 0.076 70.697)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(39% 0.141 25.723)", "--radius-selector": "2rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "1", "--noise": "1" }, autumn: { "color-scheme": "light", "--color-base-100": "oklch(95.814% 0 0)", "--color-base-200": "oklch(89.107% 0 0)", "--color-base-300": "oklch(82.4% 0 0)", "--color-base-content": "oklch(19.162% 0 0)", "--color-primary": "oklch(40.723% 0.161 17.53)", "--color-primary-content": "oklch(88.144% 0.032 17.53)", "--color-secondary": "oklch(61.676% 0.169 23.865)", "--color-secondary-content": "oklch(12.335% 0.033 23.865)", "--color-accent": "oklch(73.425% 0.094 60.729)", "--color-accent-content": "oklch(14.685% 0.018 60.729)", "--color-neutral": "oklch(54.367% 0.037 51.902)", "--color-neutral-content": "oklch(90.873% 0.007 51.902)", "--color-info": "oklch(69.224% 0.097 207.284)", "--color-info-content": "oklch(13.844% 0.019 207.284)", "--color-success": "oklch(60.995% 0.08 174.616)", "--color-success-content": "oklch(12.199% 0.016 174.616)", "--color-warning": "oklch(70.081% 0.164 56.844)", "--color-warning-content": "oklch(14.016% 0.032 56.844)", "--color-error": "oklch(53.07% 0.241 24.16)", "--color-error-content": "oklch(90.614% 0.048 24.16)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, emerald: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(93% 0 0)", "--color-base-300": "oklch(86% 0 0)", "--color-base-content": "oklch(35.519% 0.032 262.988)", "--color-primary": "oklch(76.662% 0.135 153.45)", "--color-primary-content": "oklch(33.387% 0.04 162.24)", "--color-secondary": "oklch(61.302% 0.202 261.294)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(72.772% 0.149 33.2)", "--color-accent-content": "oklch(0% 0 0)", "--color-neutral": "oklch(35.519% 0.032 262.988)", "--color-neutral-content": "oklch(98.462% 0.001 247.838)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, cupcake: { "color-scheme": "light", "--color-base-100": "oklch(97.788% 0.004 56.375)", "--color-base-200": "oklch(93.982% 0.007 61.449)", "--color-base-300": "oklch(91.586% 0.006 53.44)", "--color-base-content": "oklch(23.574% 0.066 313.189)", "--color-primary": "oklch(85% 0.138 181.071)", "--color-primary-content": "oklch(43% 0.078 188.216)", "--color-secondary": "oklch(89% 0.061 343.231)", "--color-secondary-content": "oklch(45% 0.187 3.815)", "--color-accent": "oklch(90% 0.076 70.697)", "--color-accent-content": "oklch(47% 0.157 37.304)", "--color-neutral": "oklch(27% 0.006 286.033)", "--color-neutral-content": "oklch(92% 0.004 286.32)", "--color-info": "oklch(68% 0.169 237.323)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(69% 0.17 162.48)", "--color-success-content": "oklch(26% 0.051 172.552)", "--color-warning": "oklch(79% 0.184 86.047)", "--color-warning-content": "oklch(28% 0.066 53.813)", "--color-error": "oklch(64% 0.246 16.439)", "--color-error-content": "oklch(27% 0.105 12.094)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "1", "--noise": "0" }, cmyk: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(95% 0 0)", "--color-base-300": "oklch(90% 0 0)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(71.772% 0.133 239.443)", "--color-primary-content": "oklch(14.354% 0.026 239.443)", "--color-secondary": "oklch(64.476% 0.202 359.339)", "--color-secondary-content": "oklch(12.895% 0.04 359.339)", "--color-accent": "oklch(94.228% 0.189 105.306)", "--color-accent-content": "oklch(18.845% 0.037 105.306)", "--color-neutral": "oklch(21.778% 0 0)", "--color-neutral-content": "oklch(84.355% 0 0)", "--color-info": "oklch(68.475% 0.094 217.284)", "--color-info-content": "oklch(13.695% 0.018 217.284)", "--color-success": "oklch(46.949% 0.162 321.406)", "--color-success-content": "oklch(89.389% 0.032 321.406)", "--color-warning": "oklch(71.236% 0.159 52.023)", "--color-warning-content": "oklch(14.247% 0.031 52.023)", "--color-error": "oklch(62.013% 0.208 28.717)", "--color-error-content": "oklch(12.402% 0.041 28.717)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, business: { "color-scheme": "dark", "--color-base-100": "oklch(24.353% 0 0)", "--color-base-200": "oklch(22.648% 0 0)", "--color-base-300": "oklch(20.944% 0 0)", "--color-base-content": "oklch(84.87% 0 0)", "--color-primary": "oklch(41.703% 0.099 251.473)", "--color-primary-content": "oklch(88.34% 0.019 251.473)", "--color-secondary": "oklch(64.092% 0.027 229.389)", "--color-secondary-content": "oklch(12.818% 0.005 229.389)", "--color-accent": "oklch(67.271% 0.167 35.791)", "--color-accent-content": "oklch(13.454% 0.033 35.791)", "--color-neutral": "oklch(27.441% 0.013 253.041)", "--color-neutral-content": "oklch(85.488% 0.002 253.041)", "--color-info": "oklch(62.616% 0.143 240.033)", "--color-info-content": "oklch(12.523% 0.028 240.033)", "--color-success": "oklch(70.226% 0.094 156.596)", "--color-success-content": "oklch(14.045% 0.018 156.596)", "--color-warning": "oklch(77.482% 0.115 81.519)", "--color-warning-content": "oklch(15.496% 0.023 81.519)", "--color-error": "oklch(51.61% 0.146 29.674)", "--color-error-content": "oklch(90.322% 0.029 29.674)", "--radius-selector": "0rem", "--radius-field": "0.25rem", "--radius-box": "0.25rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, winter: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97.466% 0.011 259.822)", "--color-base-300": "oklch(93.268% 0.016 262.751)", "--color-base-content": "oklch(41.886% 0.053 255.824)", "--color-primary": "oklch(56.86% 0.255 257.57)", "--color-primary-content": "oklch(91.372% 0.051 257.57)", "--color-secondary": "oklch(42.551% 0.161 282.339)", "--color-secondary-content": "oklch(88.51% 0.032 282.339)", "--color-accent": "oklch(59.939% 0.191 335.171)", "--color-accent-content": "oklch(11.988% 0.038 335.171)", "--color-neutral": "oklch(19.616% 0.063 257.651)", "--color-neutral-content": "oklch(83.923% 0.012 257.651)", "--color-info": "oklch(88.127% 0.085 214.515)", "--color-info-content": "oklch(17.625% 0.017 214.515)", "--color-success": "oklch(80.494% 0.077 197.823)", "--color-success-content": "oklch(16.098% 0.015 197.823)", "--color-warning": "oklch(89.172% 0.045 71.47)", "--color-warning-content": "oklch(17.834% 0.009 71.47)", "--color-error": "oklch(73.092% 0.11 20.076)", "--color-error-content": "oklch(14.618% 0.022 20.076)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, halloween: { "color-scheme": "dark", "--color-base-100": "oklch(21% 0.006 56.043)", "--color-base-200": "oklch(14% 0.004 49.25)", "--color-base-300": "oklch(0% 0 0)", "--color-base-content": "oklch(84.955% 0 0)", "--color-primary": "oklch(77.48% 0.204 60.62)", "--color-primary-content": "oklch(19.693% 0.004 196.779)", "--color-secondary": "oklch(45.98% 0.248 305.03)", "--color-secondary-content": "oklch(89.196% 0.049 305.03)", "--color-accent": "oklch(64.8% 0.223 136.073)", "--color-accent-content": "oklch(0% 0 0)", "--color-neutral": "oklch(24.371% 0.046 65.681)", "--color-neutral-content": "oklch(84.874% 0.009 65.681)", "--color-info": "oklch(54.615% 0.215 262.88)", "--color-info-content": "oklch(90.923% 0.043 262.88)", "--color-success": "oklch(62.705% 0.169 149.213)", "--color-success-content": "oklch(12.541% 0.033 149.213)", "--color-warning": "oklch(66.584% 0.157 58.318)", "--color-warning-content": "oklch(13.316% 0.031 58.318)", "--color-error": "oklch(65.72% 0.199 27.33)", "--color-error-content": "oklch(13.144% 0.039 27.33)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, fantasy: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(93% 0 0)", "--color-base-300": "oklch(86% 0 0)", "--color-base-content": "oklch(27.807% 0.029 256.847)", "--color-primary": "oklch(37.45% 0.189 325.02)", "--color-primary-content": "oklch(87.49% 0.037 325.02)", "--color-secondary": "oklch(53.92% 0.162 241.36)", "--color-secondary-content": "oklch(90.784% 0.032 241.36)", "--color-accent": "oklch(75.98% 0.204 56.72)", "--color-accent-content": "oklch(15.196% 0.04 56.72)", "--color-neutral": "oklch(27.807% 0.029 256.847)", "--color-neutral-content": "oklch(85.561% 0.005 256.847)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, wireframe: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97% 0 0)", "--color-base-300": "oklch(94% 0 0)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(87% 0 0)", "--color-primary-content": "oklch(26% 0 0)", "--color-secondary": "oklch(87% 0 0)", "--color-secondary-content": "oklch(26% 0 0)", "--color-accent": "oklch(87% 0 0)", "--color-accent-content": "oklch(26% 0 0)", "--color-neutral": "oklch(87% 0 0)", "--color-neutral-content": "oklch(26% 0 0)", "--color-info": "oklch(44% 0.11 240.79)", "--color-info-content": "oklch(90% 0.058 230.902)", "--color-success": "oklch(43% 0.095 166.913)", "--color-success-content": "oklch(90% 0.093 164.15)", "--color-warning": "oklch(47% 0.137 46.201)", "--color-warning-content": "oklch(92% 0.12 95.746)", "--color-error": "oklch(44% 0.177 26.899)", "--color-error-content": "oklch(88% 0.062 18.334)", "--radius-selector": "0rem", "--radius-field": "0.25rem", "--radius-box": "0.25rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" } }; + +// packages/daisyui/theme/index.js +var theme_default = plugin.withOptions((options = {}) => { + return ({ addBase }) => { + const { + name = "custom-theme", + default: isDefault = false, + prefersdark = false, + "color-scheme": colorScheme = "normal", + root = ":root", + ...customThemeTokens + } = options; + let selector = `${root}:has(input.theme-controller[value=${name}]:checked),[data-theme="${name}"]`; + if (isDefault) { + selector = `:where(${root}),${selector}`; + } + let themeTokens = { ...customThemeTokens }; + if (object_default[name]) { + const builtinTheme = object_default[name]; + themeTokens = { + ...builtinTheme, + ...customThemeTokens, + "color-scheme": colorScheme || builtinTheme.colorScheme + }; + } + const baseStyles = { + [selector]: { + "color-scheme": themeTokens["color-scheme"] || colorScheme, + ...themeTokens + } + }; + if (prefersdark) { + addBase({ + "@media (prefers-color-scheme: dark)": { + [root]: baseStyles[selector] + } + }); + } + addBase(baseStyles); + }; +}); + + +/* + + MIT License + + Copyright (c) 2020 Pouya Saadeghi – https://daisyui.com + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +*/ diff --git a/web-ng/assets/vendor/daisyui.js b/web-ng/assets/vendor/daisyui.js new file mode 100644 index 000000000..46bf6bf02 --- /dev/null +++ b/web-ng/assets/vendor/daisyui.js @@ -0,0 +1,1031 @@ +/** 🌼 + * @license MIT + * daisyUI bundle + * https://daisyui.com/ + */ + +var __defProp = Object.defineProperty; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __moduleCache = /* @__PURE__ */ new WeakMap; +var __toCommonJS = (from) => { + var entry = __moduleCache.get(from), desc; + if (entry) + return entry; + entry = __defProp({}, "__esModule", { value: true }); + if (from && typeof from === "object" || typeof from === "function") + __getOwnPropNames(from).map((key) => !__hasOwnProp.call(entry, key) && __defProp(entry, key, { + get: () => from[key], + enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable + })); + __moduleCache.set(from, entry); + return entry; +}; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { + get: all[name], + enumerable: true, + configurable: true, + set: (newValue) => all[name] = () => newValue + }); +}; + +// packages/daisyui/index.js +var exports_daisyui = {}; +__export(exports_daisyui, { + default: () => daisyui_default +}); +module.exports = __toCommonJS(exports_daisyui); + +// packages/daisyui/functions/themeOrder.js +var themeOrder_default = [ + "light", + "dark", + "cupcake", + "bumblebee", + "emerald", + "corporate", + "synthwave", + "retro", + "cyberpunk", + "valentine", + "halloween", + "garden", + "forest", + "aqua", + "lofi", + "pastel", + "fantasy", + "wireframe", + "black", + "luxury", + "dracula", + "cmyk", + "autumn", + "business", + "acid", + "lemonade", + "night", + "coffee", + "winter", + "dim", + "nord", + "sunset", + "caramellatte", + "abyss", + "silk" +]; + +// packages/daisyui/functions/pluginOptionsHandler.js +var pluginOptionsHandler = (() => { + let firstRun = true; + return (options, addBase, themesObject, packageVersion) => { + const { + logs = true, + root = ":root", + themes = ["light --default", "dark --prefersdark"], + include, + exclude, + prefix = "" + } = options || {}; + if (logs !== false && firstRun) { + console.log(`${atob("Lyoh")} ${decodeURIComponent("%F0%9F%8C%BC")} ${atob("ZGFpc3lVSQ==")} ${packageVersion} ${atob("Ki8=")}`); + firstRun = false; + } + const applyTheme = (themeName, flags) => { + const theme = themesObject[themeName]; + if (theme) { + let selector = `${root}:has(input.theme-controller[value=${themeName}]:checked),[data-theme=${themeName}]`; + if (flags.includes("--default")) { + selector = `:where(${root}),${selector}`; + } + addBase({ [selector]: theme }); + if (flags.includes("--prefersdark")) { + addBase({ "@media (prefers-color-scheme: dark)": { [root]: theme } }); + } + } + }; + if (themes === "all") { + if (themesObject["light"]) { + applyTheme("light", ["--default"]); + } + if (themesObject["dark"]) { + addBase({ "@media (prefers-color-scheme: dark)": { [root]: themesObject["dark"] } }); + } + themeOrder_default.forEach((themeName) => { + if (themesObject[themeName]) { + applyTheme(themeName, []); + } + }); + } else if (themes) { + const themeArray = Array.isArray(themes) ? themes : [themes]; + if (themeArray.length === 1 && themeArray[0].includes("--default")) { + const [themeName, ...flags] = themeArray[0].split(" "); + applyTheme(themeName, flags); + return { include, exclude, prefix }; + } + themeArray.forEach((themeOption) => { + const [themeName, ...flags] = themeOption.split(" "); + if (flags.includes("--default")) { + applyTheme(themeName, ["--default"]); + } + }); + themeArray.forEach((themeOption) => { + const [themeName, ...flags] = themeOption.split(" "); + if (flags.includes("--prefersdark")) { + addBase({ "@media (prefers-color-scheme: dark)": { [root]: themesObject[themeName] } }); + } + }); + themeArray.forEach((themeOption) => { + const [themeName] = themeOption.split(" "); + applyTheme(themeName, []); + }); + } + return { include, exclude, prefix }; + }; +})(); + +// packages/daisyui/functions/plugin.js +var plugin = { + withOptions: (pluginFunction, configFunction = () => ({})) => { + const optionsFunction = (options) => { + const handler = pluginFunction(options); + const config = configFunction(options); + return { handler, config }; + }; + optionsFunction.__isOptionsFunction = true; + return optionsFunction; + } +}; + +// packages/daisyui/functions/variables.js +var variables_default = { + colors: { + "base-100": "var(--color-base-100)", + "base-200": "var(--color-base-200)", + "base-300": "var(--color-base-300)", + "base-content": "var(--color-base-content)", + primary: "var(--color-primary)", + "primary-content": "var(--color-primary-content)", + secondary: "var(--color-secondary)", + "secondary-content": "var(--color-secondary-content)", + accent: "var(--color-accent)", + "accent-content": "var(--color-accent-content)", + neutral: "var(--color-neutral)", + "neutral-content": "var(--color-neutral-content)", + info: "var(--color-info)", + "info-content": "var(--color-info-content)", + success: "var(--color-success)", + "success-content": "var(--color-success-content)", + warning: "var(--color-warning)", + "warning-content": "var(--color-warning-content)", + error: "var(--color-error)", + "error-content": "var(--color-error-content)" + }, + borderRadius: { + selector: "var(--radius-selector)", + field: "var(--radius-field)", + box: "var(--radius-box)" + } +}; + +// packages/daisyui/theme/object.js +var object_default = { cyberpunk: { "color-scheme": "light", "--color-base-100": "oklch(94.51% 0.179 104.32)", "--color-base-200": "oklch(91.51% 0.179 104.32)", "--color-base-300": "oklch(85.51% 0.179 104.32)", "--color-base-content": "oklch(0% 0 0)", "--color-primary": "oklch(74.22% 0.209 6.35)", "--color-primary-content": "oklch(14.844% 0.041 6.35)", "--color-secondary": "oklch(83.33% 0.184 204.72)", "--color-secondary-content": "oklch(16.666% 0.036 204.72)", "--color-accent": "oklch(71.86% 0.217 310.43)", "--color-accent-content": "oklch(14.372% 0.043 310.43)", "--color-neutral": "oklch(23.04% 0.065 269.31)", "--color-neutral-content": "oklch(94.51% 0.179 104.32)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "0rem", "--radius-field": "0rem", "--radius-box": "0rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, acid: { "color-scheme": "light", "--color-base-100": "oklch(98% 0 0)", "--color-base-200": "oklch(95% 0 0)", "--color-base-300": "oklch(91% 0 0)", "--color-base-content": "oklch(0% 0 0)", "--color-primary": "oklch(71.9% 0.357 330.759)", "--color-primary-content": "oklch(14.38% 0.071 330.759)", "--color-secondary": "oklch(73.37% 0.224 48.25)", "--color-secondary-content": "oklch(14.674% 0.044 48.25)", "--color-accent": "oklch(92.78% 0.264 122.962)", "--color-accent-content": "oklch(18.556% 0.052 122.962)", "--color-neutral": "oklch(21.31% 0.128 278.68)", "--color-neutral-content": "oklch(84.262% 0.025 278.68)", "--color-info": "oklch(60.72% 0.227 252.05)", "--color-info-content": "oklch(12.144% 0.045 252.05)", "--color-success": "oklch(85.72% 0.266 158.53)", "--color-success-content": "oklch(17.144% 0.053 158.53)", "--color-warning": "oklch(91.01% 0.212 100.5)", "--color-warning-content": "oklch(18.202% 0.042 100.5)", "--color-error": "oklch(64.84% 0.293 29.349)", "--color-error-content": "oklch(12.968% 0.058 29.349)", "--radius-selector": "1rem", "--radius-field": "1rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, black: { "color-scheme": "dark", "--color-base-100": "oklch(0% 0 0)", "--color-base-200": "oklch(19% 0 0)", "--color-base-300": "oklch(22% 0 0)", "--color-base-content": "oklch(87.609% 0 0)", "--color-primary": "oklch(35% 0 0)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(35% 0 0)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(35% 0 0)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(35% 0 0)", "--color-neutral-content": "oklch(100% 0 0)", "--color-info": "oklch(45.201% 0.313 264.052)", "--color-info-content": "oklch(89.04% 0.062 264.052)", "--color-success": "oklch(51.975% 0.176 142.495)", "--color-success-content": "oklch(90.395% 0.035 142.495)", "--color-warning": "oklch(96.798% 0.211 109.769)", "--color-warning-content": "oklch(19.359% 0.042 109.769)", "--color-error": "oklch(62.795% 0.257 29.233)", "--color-error-content": "oklch(12.559% 0.051 29.233)", "--radius-selector": "0rem", "--radius-field": "0rem", "--radius-box": "0rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, dark: { "color-scheme": "dark", "--color-base-100": "oklch(25.33% 0.016 252.42)", "--color-base-200": "oklch(23.26% 0.014 253.1)", "--color-base-300": "oklch(21.15% 0.012 254.09)", "--color-base-content": "oklch(97.807% 0.029 256.847)", "--color-primary": "oklch(58% 0.233 277.117)", "--color-primary-content": "oklch(96% 0.018 272.314)", "--color-secondary": "oklch(65% 0.241 354.308)", "--color-secondary-content": "oklch(94% 0.028 342.258)", "--color-accent": "oklch(77% 0.152 181.912)", "--color-accent-content": "oklch(38% 0.063 188.416)", "--color-neutral": "oklch(14% 0.005 285.823)", "--color-neutral-content": "oklch(92% 0.004 286.32)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(76% 0.177 163.223)", "--color-success-content": "oklch(37% 0.077 168.94)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(71% 0.194 13.428)", "--color-error-content": "oklch(27% 0.105 12.094)", "--radius-selector": "0.5rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, light: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(98% 0 0)", "--color-base-300": "oklch(95% 0 0)", "--color-base-content": "oklch(21% 0.006 285.885)", "--color-primary": "oklch(45% 0.24 277.023)", "--color-primary-content": "oklch(93% 0.034 272.788)", "--color-secondary": "oklch(65% 0.241 354.308)", "--color-secondary-content": "oklch(94% 0.028 342.258)", "--color-accent": "oklch(77% 0.152 181.912)", "--color-accent-content": "oklch(38% 0.063 188.416)", "--color-neutral": "oklch(14% 0.005 285.823)", "--color-neutral-content": "oklch(92% 0.004 286.32)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(76% 0.177 163.223)", "--color-success-content": "oklch(37% 0.077 168.94)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(71% 0.194 13.428)", "--color-error-content": "oklch(27% 0.105 12.094)", "--radius-selector": "0.5rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, luxury: { "color-scheme": "dark", "--color-base-100": "oklch(14.076% 0.004 285.822)", "--color-base-200": "oklch(20.219% 0.004 308.229)", "--color-base-300": "oklch(23.219% 0.004 308.229)", "--color-base-content": "oklch(75.687% 0.123 76.89)", "--color-primary": "oklch(100% 0 0)", "--color-primary-content": "oklch(20% 0 0)", "--color-secondary": "oklch(27.581% 0.064 261.069)", "--color-secondary-content": "oklch(85.516% 0.012 261.069)", "--color-accent": "oklch(36.674% 0.051 338.825)", "--color-accent-content": "oklch(87.334% 0.01 338.825)", "--color-neutral": "oklch(24.27% 0.057 59.825)", "--color-neutral-content": "oklch(93.203% 0.089 90.861)", "--color-info": "oklch(79.061% 0.121 237.133)", "--color-info-content": "oklch(15.812% 0.024 237.133)", "--color-success": "oklch(78.119% 0.192 132.154)", "--color-success-content": "oklch(15.623% 0.038 132.154)", "--color-warning": "oklch(86.127% 0.136 102.891)", "--color-warning-content": "oklch(17.225% 0.027 102.891)", "--color-error": "oklch(71.753% 0.176 22.568)", "--color-error-content": "oklch(14.35% 0.035 22.568)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, dracula: { "color-scheme": "dark", "--color-base-100": "oklch(28.822% 0.022 277.508)", "--color-base-200": "oklch(26.805% 0.02 277.508)", "--color-base-300": "oklch(24.787% 0.019 277.508)", "--color-base-content": "oklch(97.747% 0.007 106.545)", "--color-primary": "oklch(75.461% 0.183 346.812)", "--color-primary-content": "oklch(15.092% 0.036 346.812)", "--color-secondary": "oklch(74.202% 0.148 301.883)", "--color-secondary-content": "oklch(14.84% 0.029 301.883)", "--color-accent": "oklch(83.392% 0.124 66.558)", "--color-accent-content": "oklch(16.678% 0.024 66.558)", "--color-neutral": "oklch(39.445% 0.032 275.524)", "--color-neutral-content": "oklch(87.889% 0.006 275.524)", "--color-info": "oklch(88.263% 0.093 212.846)", "--color-info-content": "oklch(17.652% 0.018 212.846)", "--color-success": "oklch(87.099% 0.219 148.024)", "--color-success-content": "oklch(17.419% 0.043 148.024)", "--color-warning": "oklch(95.533% 0.134 112.757)", "--color-warning-content": "oklch(19.106% 0.026 112.757)", "--color-error": "oklch(68.22% 0.206 24.43)", "--color-error-content": "oklch(13.644% 0.041 24.43)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, retro: { "color-scheme": "light", "--color-base-100": "oklch(91.637% 0.034 90.515)", "--color-base-200": "oklch(88.272% 0.049 91.774)", "--color-base-300": "oklch(84.133% 0.065 90.856)", "--color-base-content": "oklch(41% 0.112 45.904)", "--color-primary": "oklch(80% 0.114 19.571)", "--color-primary-content": "oklch(39% 0.141 25.723)", "--color-secondary": "oklch(92% 0.084 155.995)", "--color-secondary-content": "oklch(44% 0.119 151.328)", "--color-accent": "oklch(68% 0.162 75.834)", "--color-accent-content": "oklch(41% 0.112 45.904)", "--color-neutral": "oklch(44% 0.011 73.639)", "--color-neutral-content": "oklch(86% 0.005 56.366)", "--color-info": "oklch(58% 0.158 241.966)", "--color-info-content": "oklch(96% 0.059 95.617)", "--color-success": "oklch(51% 0.096 186.391)", "--color-success-content": "oklch(96% 0.059 95.617)", "--color-warning": "oklch(64% 0.222 41.116)", "--color-warning-content": "oklch(96% 0.059 95.617)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(40% 0.123 38.172)", "--radius-selector": "0.25rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, lofi: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97% 0 0)", "--color-base-300": "oklch(94% 0 0)", "--color-base-content": "oklch(0% 0 0)", "--color-primary": "oklch(15.906% 0 0)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(21.455% 0.001 17.278)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(26.861% 0 0)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(0% 0 0)", "--color-neutral-content": "oklch(100% 0 0)", "--color-info": "oklch(79.54% 0.103 205.9)", "--color-info-content": "oklch(15.908% 0.02 205.9)", "--color-success": "oklch(90.13% 0.153 164.14)", "--color-success-content": "oklch(18.026% 0.03 164.14)", "--color-warning": "oklch(88.37% 0.135 79.94)", "--color-warning-content": "oklch(17.674% 0.027 79.94)", "--color-error": "oklch(78.66% 0.15 28.47)", "--color-error-content": "oklch(15.732% 0.03 28.47)", "--radius-selector": "2rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, valentine: { "color-scheme": "light", "--color-base-100": "oklch(97% 0.014 343.198)", "--color-base-200": "oklch(94% 0.028 342.258)", "--color-base-300": "oklch(89% 0.061 343.231)", "--color-base-content": "oklch(52% 0.223 3.958)", "--color-primary": "oklch(65% 0.241 354.308)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(62% 0.265 303.9)", "--color-secondary-content": "oklch(97% 0.014 308.299)", "--color-accent": "oklch(82% 0.111 230.318)", "--color-accent-content": "oklch(39% 0.09 240.876)", "--color-neutral": "oklch(40% 0.153 2.432)", "--color-neutral-content": "oklch(89% 0.061 343.231)", "--color-info": "oklch(86% 0.127 207.078)", "--color-info-content": "oklch(44% 0.11 240.79)", "--color-success": "oklch(84% 0.143 164.978)", "--color-success-content": "oklch(43% 0.095 166.913)", "--color-warning": "oklch(75% 0.183 55.934)", "--color-warning-content": "oklch(26% 0.079 36.259)", "--color-error": "oklch(63% 0.237 25.331)", "--color-error-content": "oklch(97% 0.013 17.38)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, nord: { "color-scheme": "light", "--color-base-100": "oklch(95.127% 0.007 260.731)", "--color-base-200": "oklch(93.299% 0.01 261.788)", "--color-base-300": "oklch(89.925% 0.016 262.749)", "--color-base-content": "oklch(32.437% 0.022 264.182)", "--color-primary": "oklch(59.435% 0.077 254.027)", "--color-primary-content": "oklch(11.887% 0.015 254.027)", "--color-secondary": "oklch(69.651% 0.059 248.687)", "--color-secondary-content": "oklch(13.93% 0.011 248.687)", "--color-accent": "oklch(77.464% 0.062 217.469)", "--color-accent-content": "oklch(15.492% 0.012 217.469)", "--color-neutral": "oklch(45.229% 0.035 264.131)", "--color-neutral-content": "oklch(89.925% 0.016 262.749)", "--color-info": "oklch(69.207% 0.062 332.664)", "--color-info-content": "oklch(13.841% 0.012 332.664)", "--color-success": "oklch(76.827% 0.074 131.063)", "--color-success-content": "oklch(15.365% 0.014 131.063)", "--color-warning": "oklch(85.486% 0.089 84.093)", "--color-warning-content": "oklch(17.097% 0.017 84.093)", "--color-error": "oklch(60.61% 0.12 15.341)", "--color-error-content": "oklch(12.122% 0.024 15.341)", "--radius-selector": "1rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, lemonade: { "color-scheme": "light", "--color-base-100": "oklch(98.71% 0.02 123.72)", "--color-base-200": "oklch(91.8% 0.018 123.72)", "--color-base-300": "oklch(84.89% 0.017 123.72)", "--color-base-content": "oklch(19.742% 0.004 123.72)", "--color-primary": "oklch(58.92% 0.199 134.6)", "--color-primary-content": "oklch(11.784% 0.039 134.6)", "--color-secondary": "oklch(77.75% 0.196 111.09)", "--color-secondary-content": "oklch(15.55% 0.039 111.09)", "--color-accent": "oklch(85.39% 0.201 100.73)", "--color-accent-content": "oklch(17.078% 0.04 100.73)", "--color-neutral": "oklch(30.98% 0.075 108.6)", "--color-neutral-content": "oklch(86.196% 0.015 108.6)", "--color-info": "oklch(86.19% 0.047 224.14)", "--color-info-content": "oklch(17.238% 0.009 224.14)", "--color-success": "oklch(86.19% 0.047 157.85)", "--color-success-content": "oklch(17.238% 0.009 157.85)", "--color-warning": "oklch(86.19% 0.047 102.15)", "--color-warning-content": "oklch(17.238% 0.009 102.15)", "--color-error": "oklch(86.19% 0.047 25.85)", "--color-error-content": "oklch(17.238% 0.009 25.85)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, garden: { "color-scheme": "light", "--color-base-100": "oklch(92.951% 0.002 17.197)", "--color-base-200": "oklch(86.445% 0.002 17.197)", "--color-base-300": "oklch(79.938% 0.001 17.197)", "--color-base-content": "oklch(16.961% 0.001 17.32)", "--color-primary": "oklch(62.45% 0.278 3.836)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(48.495% 0.11 355.095)", "--color-secondary-content": "oklch(89.699% 0.022 355.095)", "--color-accent": "oklch(56.273% 0.054 154.39)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(24.155% 0.049 89.07)", "--color-neutral-content": "oklch(92.951% 0.002 17.197)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, aqua: { "color-scheme": "dark", "--color-base-100": "oklch(37% 0.146 265.522)", "--color-base-200": "oklch(28% 0.091 267.935)", "--color-base-300": "oklch(22% 0.091 267.935)", "--color-base-content": "oklch(90% 0.058 230.902)", "--color-primary": "oklch(85.661% 0.144 198.645)", "--color-primary-content": "oklch(40.124% 0.068 197.603)", "--color-secondary": "oklch(60.682% 0.108 309.782)", "--color-secondary-content": "oklch(96% 0.016 293.756)", "--color-accent": "oklch(93.426% 0.102 94.555)", "--color-accent-content": "oklch(18.685% 0.02 94.555)", "--color-neutral": "oklch(27% 0.146 265.522)", "--color-neutral-content": "oklch(80% 0.146 265.522)", "--color-info": "oklch(54.615% 0.215 262.88)", "--color-info-content": "oklch(90.923% 0.043 262.88)", "--color-success": "oklch(62.705% 0.169 149.213)", "--color-success-content": "oklch(12.541% 0.033 149.213)", "--color-warning": "oklch(66.584% 0.157 58.318)", "--color-warning-content": "oklch(27% 0.077 45.635)", "--color-error": "oklch(73.95% 0.19 27.33)", "--color-error-content": "oklch(14.79% 0.038 27.33)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, corporate: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(93% 0 0)", "--color-base-300": "oklch(86% 0 0)", "--color-base-content": "oklch(22.389% 0.031 278.072)", "--color-primary": "oklch(58% 0.158 241.966)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(55% 0.046 257.417)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(60% 0.118 184.704)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(0% 0 0)", "--color-neutral-content": "oklch(100% 0 0)", "--color-info": "oklch(60% 0.126 221.723)", "--color-info-content": "oklch(100% 0 0)", "--color-success": "oklch(62% 0.194 149.214)", "--color-success-content": "oklch(100% 0 0)", "--color-warning": "oklch(85% 0.199 91.936)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "0.25rem", "--radius-field": "0.25rem", "--radius-box": "0.25rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, pastel: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(98.462% 0.001 247.838)", "--color-base-300": "oklch(92.462% 0.001 247.838)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(90% 0.063 306.703)", "--color-primary-content": "oklch(49% 0.265 301.924)", "--color-secondary": "oklch(89% 0.058 10.001)", "--color-secondary-content": "oklch(51% 0.222 16.935)", "--color-accent": "oklch(90% 0.093 164.15)", "--color-accent-content": "oklch(50% 0.118 165.612)", "--color-neutral": "oklch(55% 0.046 257.417)", "--color-neutral-content": "oklch(92% 0.013 255.508)", "--color-info": "oklch(86% 0.127 207.078)", "--color-info-content": "oklch(52% 0.105 223.128)", "--color-success": "oklch(87% 0.15 154.449)", "--color-success-content": "oklch(52% 0.154 150.069)", "--color-warning": "oklch(83% 0.128 66.29)", "--color-warning-content": "oklch(55% 0.195 38.402)", "--color-error": "oklch(80% 0.114 19.571)", "--color-error-content": "oklch(50% 0.213 27.518)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "0", "--noise": "0" }, bumblebee: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97% 0 0)", "--color-base-300": "oklch(92% 0 0)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(85% 0.199 91.936)", "--color-primary-content": "oklch(42% 0.095 57.708)", "--color-secondary": "oklch(75% 0.183 55.934)", "--color-secondary-content": "oklch(40% 0.123 38.172)", "--color-accent": "oklch(0% 0 0)", "--color-accent-content": "oklch(100% 0 0)", "--color-neutral": "oklch(37% 0.01 67.558)", "--color-neutral-content": "oklch(92% 0.003 48.717)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(39% 0.09 240.876)", "--color-success": "oklch(76% 0.177 163.223)", "--color-success-content": "oklch(37% 0.077 168.94)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(39% 0.141 25.723)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, coffee: { "color-scheme": "dark", "--color-base-100": "oklch(24% 0.023 329.708)", "--color-base-200": "oklch(21% 0.021 329.708)", "--color-base-300": "oklch(16% 0.019 329.708)", "--color-base-content": "oklch(72.354% 0.092 79.129)", "--color-primary": "oklch(71.996% 0.123 62.756)", "--color-primary-content": "oklch(14.399% 0.024 62.756)", "--color-secondary": "oklch(34.465% 0.029 199.194)", "--color-secondary-content": "oklch(86.893% 0.005 199.194)", "--color-accent": "oklch(42.621% 0.074 224.389)", "--color-accent-content": "oklch(88.524% 0.014 224.389)", "--color-neutral": "oklch(16.51% 0.015 326.261)", "--color-neutral-content": "oklch(83.302% 0.003 326.261)", "--color-info": "oklch(79.49% 0.063 184.558)", "--color-info-content": "oklch(15.898% 0.012 184.558)", "--color-success": "oklch(74.722% 0.072 131.116)", "--color-success-content": "oklch(14.944% 0.014 131.116)", "--color-warning": "oklch(88.15% 0.14 87.722)", "--color-warning-content": "oklch(17.63% 0.028 87.722)", "--color-error": "oklch(77.318% 0.128 31.871)", "--color-error-content": "oklch(15.463% 0.025 31.871)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, silk: { "color-scheme": "light", "--color-base-100": "oklch(97% 0.0035 67.78)", "--color-base-200": "oklch(95% 0.0081 61.42)", "--color-base-300": "oklch(90% 0.0081 61.42)", "--color-base-content": "oklch(40% 0.0081 61.42)", "--color-primary": "oklch(23.27% 0.0249 284.3)", "--color-primary-content": "oklch(94.22% 0.2505 117.44)", "--color-secondary": "oklch(23.27% 0.0249 284.3)", "--color-secondary-content": "oklch(73.92% 0.2135 50.94)", "--color-accent": "oklch(23.27% 0.0249 284.3)", "--color-accent-content": "oklch(88.92% 0.2061 189.9)", "--color-neutral": "oklch(20% 0 0)", "--color-neutral-content": "oklch(80% 0.0081 61.42)", "--color-info": "oklch(80.39% 0.1148 241.68)", "--color-info-content": "oklch(30.39% 0.1148 241.68)", "--color-success": "oklch(83.92% 0.0901 136.87)", "--color-success-content": "oklch(23.92% 0.0901 136.87)", "--color-warning": "oklch(83.92% 0.1085 80)", "--color-warning-content": "oklch(43.92% 0.1085 80)", "--color-error": "oklch(75.1% 0.1814 22.37)", "--color-error-content": "oklch(35.1% 0.1814 22.37)", "--radius-selector": "2rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "1", "--noise": "0" }, sunset: { "color-scheme": "dark", "--color-base-100": "oklch(22% 0.019 237.69)", "--color-base-200": "oklch(20% 0.019 237.69)", "--color-base-300": "oklch(18% 0.019 237.69)", "--color-base-content": "oklch(77.383% 0.043 245.096)", "--color-primary": "oklch(74.703% 0.158 39.947)", "--color-primary-content": "oklch(14.94% 0.031 39.947)", "--color-secondary": "oklch(72.537% 0.177 2.72)", "--color-secondary-content": "oklch(14.507% 0.035 2.72)", "--color-accent": "oklch(71.294% 0.166 299.844)", "--color-accent-content": "oklch(14.258% 0.033 299.844)", "--color-neutral": "oklch(26% 0.019 237.69)", "--color-neutral-content": "oklch(70% 0.019 237.69)", "--color-info": "oklch(85.559% 0.085 206.015)", "--color-info-content": "oklch(17.111% 0.017 206.015)", "--color-success": "oklch(85.56% 0.085 144.778)", "--color-success-content": "oklch(17.112% 0.017 144.778)", "--color-warning": "oklch(85.569% 0.084 74.427)", "--color-warning-content": "oklch(17.113% 0.016 74.427)", "--color-error": "oklch(85.511% 0.078 16.886)", "--color-error-content": "oklch(17.102% 0.015 16.886)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, synthwave: { "color-scheme": "dark", "--color-base-100": "oklch(15% 0.09 281.288)", "--color-base-200": "oklch(20% 0.09 281.288)", "--color-base-300": "oklch(25% 0.09 281.288)", "--color-base-content": "oklch(78% 0.115 274.713)", "--color-primary": "oklch(71% 0.202 349.761)", "--color-primary-content": "oklch(28% 0.109 3.907)", "--color-secondary": "oklch(82% 0.111 230.318)", "--color-secondary-content": "oklch(29% 0.066 243.157)", "--color-accent": "oklch(75% 0.183 55.934)", "--color-accent-content": "oklch(26% 0.079 36.259)", "--color-neutral": "oklch(45% 0.24 277.023)", "--color-neutral-content": "oklch(87% 0.065 274.039)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(77% 0.152 181.912)", "--color-success-content": "oklch(27% 0.046 192.524)", "--color-warning": "oklch(90% 0.182 98.111)", "--color-warning-content": "oklch(42% 0.095 57.708)", "--color-error": "oklch(73.7% 0.121 32.639)", "--color-error-content": "oklch(23.501% 0.096 290.329)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, dim: { "color-scheme": "dark", "--color-base-100": "oklch(30.857% 0.023 264.149)", "--color-base-200": "oklch(28.036% 0.019 264.182)", "--color-base-300": "oklch(26.346% 0.018 262.177)", "--color-base-content": "oklch(82.901% 0.031 222.959)", "--color-primary": "oklch(86.133% 0.141 139.549)", "--color-primary-content": "oklch(17.226% 0.028 139.549)", "--color-secondary": "oklch(73.375% 0.165 35.353)", "--color-secondary-content": "oklch(14.675% 0.033 35.353)", "--color-accent": "oklch(74.229% 0.133 311.379)", "--color-accent-content": "oklch(14.845% 0.026 311.379)", "--color-neutral": "oklch(24.731% 0.02 264.094)", "--color-neutral-content": "oklch(82.901% 0.031 222.959)", "--color-info": "oklch(86.078% 0.142 206.182)", "--color-info-content": "oklch(17.215% 0.028 206.182)", "--color-success": "oklch(86.171% 0.142 166.534)", "--color-success-content": "oklch(17.234% 0.028 166.534)", "--color-warning": "oklch(86.163% 0.142 94.818)", "--color-warning-content": "oklch(17.232% 0.028 94.818)", "--color-error": "oklch(82.418% 0.099 33.756)", "--color-error-content": "oklch(16.483% 0.019 33.756)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, abyss: { "color-scheme": "dark", "--color-base-100": "oklch(20% 0.08 209)", "--color-base-200": "oklch(15% 0.08 209)", "--color-base-300": "oklch(10% 0.08 209)", "--color-base-content": "oklch(90% 0.076 70.697)", "--color-primary": "oklch(92% 0.2653 125)", "--color-primary-content": "oklch(50% 0.2653 125)", "--color-secondary": "oklch(83.27% 0.0764 298.3)", "--color-secondary-content": "oklch(43.27% 0.0764 298.3)", "--color-accent": "oklch(43% 0 0)", "--color-accent-content": "oklch(98% 0 0)", "--color-neutral": "oklch(30% 0.08 209)", "--color-neutral-content": "oklch(90% 0.076 70.697)", "--color-info": "oklch(74% 0.16 232.661)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(79% 0.209 151.711)", "--color-success-content": "oklch(26% 0.065 152.934)", "--color-warning": "oklch(84.8% 0.1962 84.62)", "--color-warning-content": "oklch(44.8% 0.1962 84.62)", "--color-error": "oklch(65% 0.1985 24.22)", "--color-error-content": "oklch(27% 0.1985 24.22)", "--radius-selector": "2rem", "--radius-field": "0.25rem", "--radius-box": "0.5rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, forest: { "color-scheme": "dark", "--color-base-100": "oklch(20.84% 0.008 17.911)", "--color-base-200": "oklch(18.522% 0.007 17.911)", "--color-base-300": "oklch(16.203% 0.007 17.911)", "--color-base-content": "oklch(83.768% 0.001 17.911)", "--color-primary": "oklch(68.628% 0.185 148.958)", "--color-primary-content": "oklch(0% 0 0)", "--color-secondary": "oklch(69.776% 0.135 168.327)", "--color-secondary-content": "oklch(13.955% 0.027 168.327)", "--color-accent": "oklch(70.628% 0.119 185.713)", "--color-accent-content": "oklch(14.125% 0.023 185.713)", "--color-neutral": "oklch(30.698% 0.039 171.364)", "--color-neutral-content": "oklch(86.139% 0.007 171.364)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, night: { "color-scheme": "dark", "--color-base-100": "oklch(20.768% 0.039 265.754)", "--color-base-200": "oklch(19.314% 0.037 265.754)", "--color-base-300": "oklch(17.86% 0.034 265.754)", "--color-base-content": "oklch(84.153% 0.007 265.754)", "--color-primary": "oklch(75.351% 0.138 232.661)", "--color-primary-content": "oklch(15.07% 0.027 232.661)", "--color-secondary": "oklch(68.011% 0.158 276.934)", "--color-secondary-content": "oklch(13.602% 0.031 276.934)", "--color-accent": "oklch(72.36% 0.176 350.048)", "--color-accent-content": "oklch(14.472% 0.035 350.048)", "--color-neutral": "oklch(27.949% 0.036 260.03)", "--color-neutral-content": "oklch(85.589% 0.007 260.03)", "--color-info": "oklch(68.455% 0.148 237.251)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(78.452% 0.132 181.911)", "--color-success-content": "oklch(15.69% 0.026 181.911)", "--color-warning": "oklch(83.242% 0.139 82.95)", "--color-warning-content": "oklch(16.648% 0.027 82.95)", "--color-error": "oklch(71.785% 0.17 13.118)", "--color-error-content": "oklch(14.357% 0.034 13.118)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, caramellatte: { "color-scheme": "light", "--color-base-100": "oklch(98% 0.016 73.684)", "--color-base-200": "oklch(95% 0.038 75.164)", "--color-base-300": "oklch(90% 0.076 70.697)", "--color-base-content": "oklch(40% 0.123 38.172)", "--color-primary": "oklch(0% 0 0)", "--color-primary-content": "oklch(100% 0 0)", "--color-secondary": "oklch(22.45% 0.075 37.85)", "--color-secondary-content": "oklch(90% 0.076 70.697)", "--color-accent": "oklch(46.44% 0.111 37.85)", "--color-accent-content": "oklch(90% 0.076 70.697)", "--color-neutral": "oklch(55% 0.195 38.402)", "--color-neutral-content": "oklch(98% 0.016 73.684)", "--color-info": "oklch(42% 0.199 265.638)", "--color-info-content": "oklch(90% 0.076 70.697)", "--color-success": "oklch(43% 0.095 166.913)", "--color-success-content": "oklch(90% 0.076 70.697)", "--color-warning": "oklch(82% 0.189 84.429)", "--color-warning-content": "oklch(41% 0.112 45.904)", "--color-error": "oklch(70% 0.191 22.216)", "--color-error-content": "oklch(39% 0.141 25.723)", "--radius-selector": "2rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "1", "--noise": "1" }, autumn: { "color-scheme": "light", "--color-base-100": "oklch(95.814% 0 0)", "--color-base-200": "oklch(89.107% 0 0)", "--color-base-300": "oklch(82.4% 0 0)", "--color-base-content": "oklch(19.162% 0 0)", "--color-primary": "oklch(40.723% 0.161 17.53)", "--color-primary-content": "oklch(88.144% 0.032 17.53)", "--color-secondary": "oklch(61.676% 0.169 23.865)", "--color-secondary-content": "oklch(12.335% 0.033 23.865)", "--color-accent": "oklch(73.425% 0.094 60.729)", "--color-accent-content": "oklch(14.685% 0.018 60.729)", "--color-neutral": "oklch(54.367% 0.037 51.902)", "--color-neutral-content": "oklch(90.873% 0.007 51.902)", "--color-info": "oklch(69.224% 0.097 207.284)", "--color-info-content": "oklch(13.844% 0.019 207.284)", "--color-success": "oklch(60.995% 0.08 174.616)", "--color-success-content": "oklch(12.199% 0.016 174.616)", "--color-warning": "oklch(70.081% 0.164 56.844)", "--color-warning-content": "oklch(14.016% 0.032 56.844)", "--color-error": "oklch(53.07% 0.241 24.16)", "--color-error-content": "oklch(90.614% 0.048 24.16)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, emerald: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(93% 0 0)", "--color-base-300": "oklch(86% 0 0)", "--color-base-content": "oklch(35.519% 0.032 262.988)", "--color-primary": "oklch(76.662% 0.135 153.45)", "--color-primary-content": "oklch(33.387% 0.04 162.24)", "--color-secondary": "oklch(61.302% 0.202 261.294)", "--color-secondary-content": "oklch(100% 0 0)", "--color-accent": "oklch(72.772% 0.149 33.2)", "--color-accent-content": "oklch(0% 0 0)", "--color-neutral": "oklch(35.519% 0.032 262.988)", "--color-neutral-content": "oklch(98.462% 0.001 247.838)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, cupcake: { "color-scheme": "light", "--color-base-100": "oklch(97.788% 0.004 56.375)", "--color-base-200": "oklch(93.982% 0.007 61.449)", "--color-base-300": "oklch(91.586% 0.006 53.44)", "--color-base-content": "oklch(23.574% 0.066 313.189)", "--color-primary": "oklch(85% 0.138 181.071)", "--color-primary-content": "oklch(43% 0.078 188.216)", "--color-secondary": "oklch(89% 0.061 343.231)", "--color-secondary-content": "oklch(45% 0.187 3.815)", "--color-accent": "oklch(90% 0.076 70.697)", "--color-accent-content": "oklch(47% 0.157 37.304)", "--color-neutral": "oklch(27% 0.006 286.033)", "--color-neutral-content": "oklch(92% 0.004 286.32)", "--color-info": "oklch(68% 0.169 237.323)", "--color-info-content": "oklch(29% 0.066 243.157)", "--color-success": "oklch(69% 0.17 162.48)", "--color-success-content": "oklch(26% 0.051 172.552)", "--color-warning": "oklch(79% 0.184 86.047)", "--color-warning-content": "oklch(28% 0.066 53.813)", "--color-error": "oklch(64% 0.246 16.439)", "--color-error-content": "oklch(27% 0.105 12.094)", "--radius-selector": "1rem", "--radius-field": "2rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "2px", "--depth": "1", "--noise": "0" }, cmyk: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(95% 0 0)", "--color-base-300": "oklch(90% 0 0)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(71.772% 0.133 239.443)", "--color-primary-content": "oklch(14.354% 0.026 239.443)", "--color-secondary": "oklch(64.476% 0.202 359.339)", "--color-secondary-content": "oklch(12.895% 0.04 359.339)", "--color-accent": "oklch(94.228% 0.189 105.306)", "--color-accent-content": "oklch(18.845% 0.037 105.306)", "--color-neutral": "oklch(21.778% 0 0)", "--color-neutral-content": "oklch(84.355% 0 0)", "--color-info": "oklch(68.475% 0.094 217.284)", "--color-info-content": "oklch(13.695% 0.018 217.284)", "--color-success": "oklch(46.949% 0.162 321.406)", "--color-success-content": "oklch(89.389% 0.032 321.406)", "--color-warning": "oklch(71.236% 0.159 52.023)", "--color-warning-content": "oklch(14.247% 0.031 52.023)", "--color-error": "oklch(62.013% 0.208 28.717)", "--color-error-content": "oklch(12.402% 0.041 28.717)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, business: { "color-scheme": "dark", "--color-base-100": "oklch(24.353% 0 0)", "--color-base-200": "oklch(22.648% 0 0)", "--color-base-300": "oklch(20.944% 0 0)", "--color-base-content": "oklch(84.87% 0 0)", "--color-primary": "oklch(41.703% 0.099 251.473)", "--color-primary-content": "oklch(88.34% 0.019 251.473)", "--color-secondary": "oklch(64.092% 0.027 229.389)", "--color-secondary-content": "oklch(12.818% 0.005 229.389)", "--color-accent": "oklch(67.271% 0.167 35.791)", "--color-accent-content": "oklch(13.454% 0.033 35.791)", "--color-neutral": "oklch(27.441% 0.013 253.041)", "--color-neutral-content": "oklch(85.488% 0.002 253.041)", "--color-info": "oklch(62.616% 0.143 240.033)", "--color-info-content": "oklch(12.523% 0.028 240.033)", "--color-success": "oklch(70.226% 0.094 156.596)", "--color-success-content": "oklch(14.045% 0.018 156.596)", "--color-warning": "oklch(77.482% 0.115 81.519)", "--color-warning-content": "oklch(15.496% 0.023 81.519)", "--color-error": "oklch(51.61% 0.146 29.674)", "--color-error-content": "oklch(90.322% 0.029 29.674)", "--radius-selector": "0rem", "--radius-field": "0.25rem", "--radius-box": "0.25rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, winter: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97.466% 0.011 259.822)", "--color-base-300": "oklch(93.268% 0.016 262.751)", "--color-base-content": "oklch(41.886% 0.053 255.824)", "--color-primary": "oklch(56.86% 0.255 257.57)", "--color-primary-content": "oklch(91.372% 0.051 257.57)", "--color-secondary": "oklch(42.551% 0.161 282.339)", "--color-secondary-content": "oklch(88.51% 0.032 282.339)", "--color-accent": "oklch(59.939% 0.191 335.171)", "--color-accent-content": "oklch(11.988% 0.038 335.171)", "--color-neutral": "oklch(19.616% 0.063 257.651)", "--color-neutral-content": "oklch(83.923% 0.012 257.651)", "--color-info": "oklch(88.127% 0.085 214.515)", "--color-info-content": "oklch(17.625% 0.017 214.515)", "--color-success": "oklch(80.494% 0.077 197.823)", "--color-success-content": "oklch(16.098% 0.015 197.823)", "--color-warning": "oklch(89.172% 0.045 71.47)", "--color-warning-content": "oklch(17.834% 0.009 71.47)", "--color-error": "oklch(73.092% 0.11 20.076)", "--color-error-content": "oklch(14.618% 0.022 20.076)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" }, halloween: { "color-scheme": "dark", "--color-base-100": "oklch(21% 0.006 56.043)", "--color-base-200": "oklch(14% 0.004 49.25)", "--color-base-300": "oklch(0% 0 0)", "--color-base-content": "oklch(84.955% 0 0)", "--color-primary": "oklch(77.48% 0.204 60.62)", "--color-primary-content": "oklch(19.693% 0.004 196.779)", "--color-secondary": "oklch(45.98% 0.248 305.03)", "--color-secondary-content": "oklch(89.196% 0.049 305.03)", "--color-accent": "oklch(64.8% 0.223 136.073)", "--color-accent-content": "oklch(0% 0 0)", "--color-neutral": "oklch(24.371% 0.046 65.681)", "--color-neutral-content": "oklch(84.874% 0.009 65.681)", "--color-info": "oklch(54.615% 0.215 262.88)", "--color-info-content": "oklch(90.923% 0.043 262.88)", "--color-success": "oklch(62.705% 0.169 149.213)", "--color-success-content": "oklch(12.541% 0.033 149.213)", "--color-warning": "oklch(66.584% 0.157 58.318)", "--color-warning-content": "oklch(13.316% 0.031 58.318)", "--color-error": "oklch(65.72% 0.199 27.33)", "--color-error-content": "oklch(13.144% 0.039 27.33)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, fantasy: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(93% 0 0)", "--color-base-300": "oklch(86% 0 0)", "--color-base-content": "oklch(27.807% 0.029 256.847)", "--color-primary": "oklch(37.45% 0.189 325.02)", "--color-primary-content": "oklch(87.49% 0.037 325.02)", "--color-secondary": "oklch(53.92% 0.162 241.36)", "--color-secondary-content": "oklch(90.784% 0.032 241.36)", "--color-accent": "oklch(75.98% 0.204 56.72)", "--color-accent-content": "oklch(15.196% 0.04 56.72)", "--color-neutral": "oklch(27.807% 0.029 256.847)", "--color-neutral-content": "oklch(85.561% 0.005 256.847)", "--color-info": "oklch(72.06% 0.191 231.6)", "--color-info-content": "oklch(0% 0 0)", "--color-success": "oklch(64.8% 0.15 160)", "--color-success-content": "oklch(0% 0 0)", "--color-warning": "oklch(84.71% 0.199 83.87)", "--color-warning-content": "oklch(0% 0 0)", "--color-error": "oklch(71.76% 0.221 22.18)", "--color-error-content": "oklch(0% 0 0)", "--radius-selector": "1rem", "--radius-field": "0.5rem", "--radius-box": "1rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "1", "--noise": "0" }, wireframe: { "color-scheme": "light", "--color-base-100": "oklch(100% 0 0)", "--color-base-200": "oklch(97% 0 0)", "--color-base-300": "oklch(94% 0 0)", "--color-base-content": "oklch(20% 0 0)", "--color-primary": "oklch(87% 0 0)", "--color-primary-content": "oklch(26% 0 0)", "--color-secondary": "oklch(87% 0 0)", "--color-secondary-content": "oklch(26% 0 0)", "--color-accent": "oklch(87% 0 0)", "--color-accent-content": "oklch(26% 0 0)", "--color-neutral": "oklch(87% 0 0)", "--color-neutral-content": "oklch(26% 0 0)", "--color-info": "oklch(44% 0.11 240.79)", "--color-info-content": "oklch(90% 0.058 230.902)", "--color-success": "oklch(43% 0.095 166.913)", "--color-success-content": "oklch(90% 0.093 164.15)", "--color-warning": "oklch(47% 0.137 46.201)", "--color-warning-content": "oklch(92% 0.12 95.746)", "--color-error": "oklch(44% 0.177 26.899)", "--color-error-content": "oklch(88% 0.062 18.334)", "--radius-selector": "0rem", "--radius-field": "0.25rem", "--radius-box": "0.25rem", "--size-selector": "0.25rem", "--size-field": "0.25rem", "--border": "1px", "--depth": "0", "--noise": "0" } }; + +// packages/daisyui/base/rootscrolllock/object.js +var object_default2 = { ':root:has( .modal-open, .modal[open], .modal:target, .modal-toggle:checked, .drawer:not([class*="drawer-open"]) > .drawer-toggle:checked )': { overflow: "hidden" } }; + +// packages/daisyui/functions/addPrefix.js +var defaultExcludedPrefixes = ["color-", "size-", "radius-", "border", "depth", "noise"]; +var shouldExcludeVariable = (variableName, excludedPrefixes) => { + if (variableName.startsWith("tw")) { + return true; + } + return excludedPrefixes.some((excludedPrefix) => variableName.startsWith(excludedPrefix)); +}; +var prefixVariable = (variableName, prefix, excludedPrefixes) => { + if (shouldExcludeVariable(variableName, excludedPrefixes)) { + return variableName; + } + return `${prefix}${variableName}`; +}; +var getPrefixedSelector = (selector, prefix) => { + if (!selector.startsWith(".")) + return selector; + return `.${prefix}${selector.slice(1)}`; +}; +var getPrefixedKey = (key, prefix, excludedPrefixes) => { + const prefixAmpDot = prefix ? `&.${prefix}` : ""; + if (!prefix) + return key; + if (key.startsWith("--")) { + const variableName = key.slice(2); + return `--${prefixVariable(variableName, prefix, excludedPrefixes)}`; + } + if (key.startsWith("@") || key.startsWith("[")) { + return key; + } + if (key.startsWith("&")) { + if (key.match(/:[a-z-]+\(/)) { + return key.replace(/\.([\w-]+)/g, `.${prefix}$1`); + } + if (key.startsWith("&.")) { + return `${prefixAmpDot}${key.slice(2)}`; + } + return key.replace(/\.([\w-]+)/g, `.${prefix}$1`); + } + if (key.startsWith(":")) { + return key.replace(/\.([\w-]+)/g, `.${prefix}$1`); + } + if (key.includes(".") && !key.includes(" ") && !key.includes(">") && !key.includes("+") && !key.includes("~")) { + return key.split(".").filter(Boolean).map((part) => prefix + part).join(".").replace(/^/, "."); + } + if (key.includes(">") || key.includes("+") || key.includes("~")) { + if (key.includes(",")) { + return key.split(/\s*,\s*/).map((part) => { + return part.replace(/\.([\w-]+)/g, `.${prefix}$1`); + }).join(", "); + } + let processedKey = key.replace(/\.([\w-]+)/g, `.${prefix}$1`); + if (processedKey.startsWith(">") || processedKey.startsWith("+") || processedKey.startsWith("~")) { + processedKey = ` ${processedKey}`; + } + return processedKey; + } + if (key.includes(" ")) { + return key.split(/\s+/).map((part) => { + if (part.startsWith(".")) { + return getPrefixedSelector(part, prefix); + } + return part; + }).join(" "); + } + if (key.includes(":")) { + const [selector, ...pseudo] = key.split(":"); + if (selector.startsWith(".")) { + return `${getPrefixedSelector(selector, prefix)}:${pseudo.join(":")}`; + } + return key.replace(/\.([\w-]+)/g, `.${prefix}$1`); + } + if (key.startsWith(".")) { + return getPrefixedSelector(key, prefix); + } + return key; +}; +var processArrayValue = (value, prefix, excludedPrefixes) => { + return value.map((item) => { + if (typeof item === "string") { + if (item.startsWith(".")) { + return prefix ? `.${prefix}${item.slice(1)}` : item; + } + return processStringValue(item, prefix, excludedPrefixes); + } + return item; + }); +}; +var processStringValue = (value, prefix, excludedPrefixes) => { + if (prefix === 0) + return value; + return value.replace(/var\(--([^)]+)\)/g, (match, variableName) => { + if (shouldExcludeVariable(variableName, excludedPrefixes)) { + return match; + } + return `var(--${prefix}${variableName})`; + }); +}; +var processValue = (value, prefix, excludedPrefixes) => { + if (Array.isArray(value)) { + return processArrayValue(value, prefix, excludedPrefixes); + } else if (typeof value === "object" && value !== null) { + return addPrefix(value, prefix, excludedPrefixes); + } else if (typeof value === "string") { + return processStringValue(value, prefix, excludedPrefixes); + } else { + return value; + } +}; +var addPrefix = (obj, prefix, excludedPrefixes = defaultExcludedPrefixes) => { + return Object.entries(obj).reduce((result, [key, value]) => { + const newKey = getPrefixedKey(key, prefix, excludedPrefixes); + result[newKey] = processValue(value, prefix, excludedPrefixes); + return result; + }, {}); +}; + +// packages/daisyui/base/rootscrolllock/index.js +var rootscrolllock_default = ({ addBase, prefix = "" }) => { + const prefixedrootscrolllock = addPrefix(object_default2, prefix); + addBase({ ...prefixedrootscrolllock }); +}; + +// packages/daisyui/base/rootcolor/object.js +var object_default3 = { ":root, [data-theme]": { "background-color": "var(--root-bg, var(--color-base-100))", color: "var(--color-base-content)" } }; + +// packages/daisyui/base/rootcolor/index.js +var rootcolor_default = ({ addBase, prefix = "" }) => { + const prefixedrootcolor = addPrefix(object_default3, prefix); + addBase({ ...prefixedrootcolor }); +}; + +// packages/daisyui/base/scrollbar/object.js +var object_default4 = { ":root": { "scrollbar-color": "color-mix(in oklch, currentColor 35%, #0000) #0000" } }; + +// packages/daisyui/base/scrollbar/index.js +var scrollbar_default = ({ addBase, prefix = "" }) => { + const prefixedscrollbar = addPrefix(object_default4, prefix); + addBase({ ...prefixedscrollbar }); +}; + +// packages/daisyui/base/properties/object.js +var object_default5 = { "@property --radialprogress": { syntax: '"<percentage>"', inherits: "true", "initial-value": "0%" } }; + +// packages/daisyui/base/properties/index.js +var properties_default = ({ addBase, prefix = "" }) => { + const prefixedproperties = addPrefix(object_default5, prefix); + addBase({ ...prefixedproperties }); +}; + +// packages/daisyui/base/rootscrollgutter/object.js +var object_default6 = { ":where( :root:has( .modal-open, .modal[open], .modal:target, .modal-toggle:checked, .drawer:not(.drawer-open) > .drawer-toggle:checked ) )": { "scrollbar-gutter": "stable", "background-image": "linear-gradient(var(--color-base-100), var(--color-base-100))", "--root-bg": "color-mix(in srgb, var(--color-base-100), oklch(0% 0 0) 40%)" }, ":where(.modal[open], .modal-open, .modal-toggle:checked + .modal):not(.modal-start, .modal-end)": { "scrollbar-gutter": "stable" } }; + +// packages/daisyui/base/rootscrollgutter/index.js +var rootscrollgutter_default = ({ addBase, prefix = "" }) => { + const prefixedrootscrollgutter = addPrefix(object_default6, prefix); + addBase({ ...prefixedrootscrollgutter }); +}; + +// packages/daisyui/base/svg/object.js +var object_default7 = { ":root": { "--fx-noise": `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='a'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='1.34' numOctaves='4' stitchTiles='stitch'%3E%3C/feTurbulence%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23a)' opacity='0.2'%3E%3C/rect%3E%3C/svg%3E")` }, ".chat": { "--mask-chat": `url("data:image/svg+xml,%3csvg width='13' height='13' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M0 11.5004C0 13.0004 2 13.0004 2 13.0004H12H13V0.00036329L12.5 0C12.5 0 11.977 2.09572 11.8581 2.50033C11.6075 3.35237 10.9149 4.22374 9 5.50036C6 7.50036 0 10.0004 0 11.5004Z'/%3e%3c/svg%3e")` } }; + +// packages/daisyui/base/svg/index.js +var svg_default = ({ addBase, prefix = "" }) => { + const prefixedsvg = addPrefix(object_default7, prefix); + addBase({ ...prefixedsvg }); +}; + +// packages/daisyui/components/drawer/object.js +var object_default8 = { ".drawer": { position: "relative", display: "grid", width: "100%", "grid-auto-columns": "max-content auto" }, ".drawer-content": { "grid-column-start": "2", "grid-row-start": "1", "min-width": "calc(0.25rem * 0)" }, ".drawer-side": { "pointer-events": "none", visibility: "hidden", position: "fixed", "inset-inline-start": "calc(0.25rem * 0)", top: "calc(0.25rem * 0)", "z-index": 1, "grid-column-start": "1", "grid-row-start": "1", display: "grid", width: "100%", "grid-template-columns": "repeat(1, minmax(0, 1fr))", "grid-template-rows": "repeat(1, minmax(0, 1fr))", "align-items": "flex-start", "justify-items": "start", "overflow-x": "hidden", "overflow-y": "hidden", "overscroll-behavior": "contain", opacity: "0%", transition: "opacity 0.2s ease-out 0.1s allow-discrete, visibility 0.3s ease-out 0.1s allow-discrete", height: ["100vh", "100dvh"], "> .drawer-overlay": { position: "sticky", top: "calc(0.25rem * 0)", cursor: "pointer", "place-self": "stretch", "background-color": "oklch(0% 0 0 / 40%)" }, "> *": { "grid-column-start": "1", "grid-row-start": "1" }, "> *:not(.drawer-overlay)": { "will-change": "transform", transition: "translate 0.3s ease-out", translate: "-100%", '[dir="rtl"] &': { translate: "100%" } } }, ".drawer-toggle": { position: "fixed", height: "calc(0.25rem * 0)", width: "calc(0.25rem * 0)", appearance: "none", opacity: "0%", "&:checked": { "& ~ .drawer-side": { "pointer-events": "auto", visibility: "visible", "overflow-y": "auto", opacity: "100%", "& > *:not(.drawer-overlay)": { translate: "0%" } } }, "&:focus-visible ~ .drawer-content label.drawer-button": { outline: "2px solid", "outline-offset": "2px" } }, ".drawer-end": { "grid-auto-columns": "auto max-content", "> .drawer-toggle": { "& ~ .drawer-content": { "grid-column-start": "1" }, "& ~ .drawer-side": { "grid-column-start": "2", "justify-items": "end" }, "& ~ .drawer-side > *:not(.drawer-overlay)": { translate: "100%", '[dir="rtl"] &': { translate: "-100%" } }, "&:checked ~ .drawer-side > *:not(.drawer-overlay)": { translate: "0%" } } }, ".drawer-open": { "> .drawer-side": { "overflow-y": "auto" }, "> .drawer-toggle": { display: "none", "& ~ .drawer-side": { "pointer-events": "auto", visibility: "visible", position: "sticky", display: "block", width: "auto", "overscroll-behavior": "auto", opacity: "100%", "& > .drawer-overlay": { cursor: "default", "background-color": "transparent" }, "& > *:not(.drawer-overlay)": { translate: "0%", '[dir="rtl"] &': { translate: "0%" } } }, "&:checked ~ .drawer-side": { "pointer-events": "auto", visibility: "visible" } } } }; + +// packages/daisyui/components/drawer/index.js +var drawer_default = ({ addComponents, prefix = "" }) => { + const prefixeddrawer = addPrefix(object_default8, prefix); + addComponents({ ...prefixeddrawer }); +}; + +// packages/daisyui/components/link/object.js +var object_default9 = { ".link": { cursor: "pointer", "text-decoration-line": "underline", "&:focus": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } }, "&:focus-visible": { outline: "2px solid currentColor", "outline-offset": "2px" } }, ".link-hover": { "text-decoration-line": "none", "&:hover": { "@media (hover: hover)": { "text-decoration-line": "underline" } } }, ".link-primary": { color: "var(--color-primary)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-primary) 80%, #000)" } } }, ".link-secondary": { color: "var(--color-secondary)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-secondary) 80%, #000)" } } }, ".link-accent": { color: "var(--color-accent)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-accent) 80%, #000)" } } }, ".link-neutral": { color: "var(--color-neutral)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-neutral) 80%, #000)" } } }, ".link-success": { color: "var(--color-success)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-success) 80%, #000)" } } }, ".link-info": { color: "var(--color-info)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-info) 80%, #000)" } } }, ".link-warning": { color: "var(--color-warning)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-warning) 80%, #000)" } } }, ".link-error": { color: "var(--color-error)", "@media (hover: hover)": { "&:hover": { color: "color-mix(in oklab, var(--color-error) 80%, #000)" } } } }; + +// packages/daisyui/components/link/index.js +var link_default = ({ addComponents, prefix = "" }) => { + const prefixedlink = addPrefix(object_default9, prefix); + addComponents({ ...prefixedlink }); +}; + +// packages/daisyui/components/stat/object.js +var object_default10 = { ".stats": { position: "relative", display: "inline-grid", "grid-auto-flow": "column", "overflow-x": "auto", "border-radius": "var(--radius-box)" }, ".stat": { display: "inline-grid", width: "100%", "column-gap": "calc(0.25rem * 4)", "padding-inline": "calc(0.25rem * 6)", "padding-block": "calc(0.25rem * 4)", "grid-template-columns": "repeat(1, 1fr)", "&:not(:last-child)": { "border-inline-end": "var(--border) dashed color-mix(in oklab, currentColor 10%, #0000)", "border-block-end": "none" } }, ".stat-figure": { "grid-column-start": "2", "grid-row": "span 3 / span 3", "grid-row-start": "1", "place-self": "center", "justify-self": "flex-end" }, ".stat-title": { "grid-column-start": "1", "white-space": "nowrap", color: "color-mix(in oklab, var(--color-base-content) 60%, transparent)", "font-size": "0.75rem" }, ".stat-value": { "grid-column-start": "1", "white-space": "nowrap", "font-size": "2rem", "font-weight": 800 }, ".stat-desc": { "grid-column-start": "1", "white-space": "nowrap", color: "color-mix(in oklab, var(--color-base-content) 60%, transparent)", "font-size": "0.75rem" }, ".stat-actions": { "grid-column-start": "1", "white-space": "nowrap" }, ".stats-horizontal": { "grid-auto-flow": "column", "overflow-x": "auto", ".stat:not(:last-child)": { "border-inline-end": "var(--border) dashed color-mix(in oklab, currentColor 10%, #0000)", "border-block-end": "none" } }, ".stats-vertical": { "grid-auto-flow": "row", "overflow-y": "auto", ".stat:not(:last-child)": { "border-inline-end": "none", "border-block-end": "var(--border) dashed color-mix(in oklab, currentColor 10%, #0000)" } } }; + +// packages/daisyui/components/stat/index.js +var stat_default = ({ addComponents, prefix = "" }) => { + const prefixedstat = addPrefix(object_default10, prefix); + addComponents({ ...prefixedstat }); +}; + +// packages/daisyui/components/carousel/object.js +var object_default11 = { ".carousel": { display: "inline-flex", "overflow-x": "scroll", "scroll-snap-type": "x mandatory", "scroll-behavior": "smooth", "scrollbar-width": "none", "&::-webkit-scrollbar": { display: "none" } }, ".carousel-vertical": { "flex-direction": "column", "overflow-y": "scroll", "scroll-snap-type": "y mandatory" }, ".carousel-horizontal": { "flex-direction": "row", "overflow-x": "scroll", "scroll-snap-type": "x mandatory" }, ".carousel-item": { "box-sizing": "content-box", display: "flex", flex: "none", "scroll-snap-align": "start" }, ".carousel-start": { ".carousel-item": { "scroll-snap-align": "start" } }, ".carousel-center": { ".carousel-item": { "scroll-snap-align": "center" } }, ".carousel-end": { ".carousel-item": { "scroll-snap-align": "end" } } }; + +// packages/daisyui/components/carousel/index.js +var carousel_default = ({ addComponents, prefix = "" }) => { + const prefixedcarousel = addPrefix(object_default11, prefix); + addComponents({ ...prefixedcarousel }); +}; + +// packages/daisyui/components/divider/object.js +var object_default12 = { ".divider": { display: "flex", height: "calc(0.25rem * 4)", "flex-direction": "row", "align-items": "center", "align-self": "stretch", "white-space": "nowrap", margin: "var(--divider-m, 1rem 0)", "--divider-color": "color-mix(in oklab, var(--color-base-content) 10%, transparent)", "&:before, &:after": { content: '""', height: "calc(0.25rem * 0.5)", width: "100%", "flex-grow": 1, "background-color": "var(--divider-color)" }, "@media print": { "&:before, &:after": { border: "0.5px solid" } }, "&:not(:empty)": { gap: "calc(0.25rem * 4)" } }, ".divider-horizontal": { "--divider-m": "0 1rem", "&.divider": { height: "auto", width: "calc(0.25rem * 4)", "flex-direction": "column", "&:before": { height: "100%", width: "calc(0.25rem * 0.5)" }, "&:after": { height: "100%", width: "calc(0.25rem * 0.5)" } } }, ".divider-vertical": { "--divider-m": "1rem 0", "&.divider": { height: "calc(0.25rem * 4)", width: "auto", "flex-direction": "row", "&:before": { height: "calc(0.25rem * 0.5)", width: "100%" }, "&:after": { height: "calc(0.25rem * 0.5)", width: "100%" } } }, ".divider-neutral": { "&:before, &:after": { "background-color": "var(--color-neutral)" } }, ".divider-primary": { "&:before, &:after": { "background-color": "var(--color-primary)" } }, ".divider-secondary": { "&:before, &:after": { "background-color": "var(--color-secondary)" } }, ".divider-accent": { "&:before, &:after": { "background-color": "var(--color-accent)" } }, ".divider-success": { "&:before, &:after": { "background-color": "var(--color-success)" } }, ".divider-warning": { "&:before, &:after": { "background-color": "var(--color-warning)" } }, ".divider-info": { "&:before, &:after": { "background-color": "var(--color-info)" } }, ".divider-error": { "&:before, &:after": { "background-color": "var(--color-error)" } }, ".divider-start:before": { display: "none" }, ".divider-end:after": { display: "none" } }; + +// packages/daisyui/components/divider/index.js +var divider_default = ({ addComponents, prefix = "" }) => { + const prefixeddivider = addPrefix(object_default12, prefix); + addComponents({ ...prefixeddivider }); +}; + +// packages/daisyui/components/mask/object.js +var object_default13 = { ".mask": { display: "inline-block", "vertical-align": "middle", "mask-size": "contain", "mask-repeat": "no-repeat", "mask-position": "center" }, ".mask-half-1": { "mask-size": "200%", "mask-position": ["left", "left"], '&:where(:dir(rtl), [dir="rtl"], [dir="rtl"] *)': { "mask-position": "right" } }, ".mask-half-2": { "mask-size": "200%", "mask-position": ["right", "right"], '&:where(:dir(rtl), [dir="rtl"], [dir="rtl"] *)': { "mask-position": "left" } }, ".mask-squircle": { "mask-image": `url("data:image/svg+xml,%3csvg width='200' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M100 0C20 0 0 20 0 100s20 100 100 100 100-20 100-100S180 0 100 0Z'/%3e%3c/svg%3e")` }, ".mask-decagon": { "mask-image": `url("data:image/svg+xml,%3csvg width='192' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 0 58.779 19.098 36.327 50v61.804l-36.327 50L96 200l-58.779-19.098-36.327-50V69.098l36.327-50z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-diamond": { "mask-image": `url("data:image/svg+xml,%3csvg width='200' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m100 0 100 100-100 100L0 100z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-heart": { "mask-image": `url("data:image/svg+xml,%3csvg width='200' height='185' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M100 184.606a15.384 15.384 0 0 1-8.653-2.678C53.565 156.28 37.205 138.695 28.182 127.7 8.952 104.264-.254 80.202.005 54.146.308 24.287 24.264 0 53.406 0c21.192 0 35.869 11.937 44.416 21.879a2.884 2.884 0 0 0 4.356 0C110.725 11.927 125.402 0 146.594 0c29.142 0 53.098 24.287 53.4 54.151.26 26.061-8.956 50.122-28.176 73.554-9.023 10.994-25.383 28.58-63.165 54.228a15.384 15.384 0 0 1-8.653 2.673Z' fill='black' fill-rule='nonzero'/%3e%3c/svg%3e")` }, ".mask-hexagon": { "mask-image": `url("data:image/svg+xml,%3csvg width='182' height='201' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M.3 65.486c0-9.196 6.687-20.063 14.211-25.078l61.86-35.946c8.36-5.016 20.899-5.016 29.258 0l61.86 35.946c8.36 5.015 14.211 15.882 14.211 25.078v71.055c0 9.196-6.687 20.063-14.211 25.079l-61.86 35.945c-8.36 4.18-20.899 4.18-29.258 0L14.51 161.62C6.151 157.44.3 145.737.3 136.54V65.486Z' fill='black' fill-rule='nonzero'/%3e%3c/svg%3e")` }, ".mask-hexagon-2": { "mask-image": `url("data:image/svg+xml,%3csvg width='200' height='182' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M64.786 181.4c-9.196 0-20.063-6.687-25.079-14.21L3.762 105.33c-5.016-8.36-5.016-20.9 0-29.259l35.945-61.86C44.723 5.851 55.59 0 64.786 0h71.055c9.196 0 20.063 6.688 25.079 14.211l35.945 61.86c4.18 8.36 4.18 20.899 0 29.258l-35.945 61.86c-4.18 8.36-15.883 14.211-25.079 14.211H64.786Z' fill='black' fill-rule='nonzero'/%3e%3c/svg%3e")` }, ".mask-circle": { "mask-image": `url("data:image/svg+xml,%3csvg width='200' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle fill='black' cx='100' cy='100' r='100' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-pentagon": { "mask-image": `url("data:image/svg+xml,%3csvg width='192' height='181' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 0 95.106 69.098-36.327 111.804H37.22L.894 69.098z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-star": { "mask-image": `url("data:image/svg+xml,%3csvg width='192' height='180' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 137.263-58.779 42.024 22.163-68.389L.894 68.481l72.476-.243L96 0l22.63 68.238 72.476.243-58.49 42.417 22.163 68.389z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-star-2": { "mask-image": `url("data:image/svg+xml,%3csvg width='192' height='180' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 153.044-58.779 26.243 7.02-63.513L.894 68.481l63.117-13.01L96 0l31.989 55.472 63.117 13.01-43.347 47.292 7.02 63.513z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-triangle": { "mask-image": `url("data:image/svg+xml,%3csvg width='174' height='149' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m87 148.476-86.603.185L43.86 74.423 87 0l43.14 74.423 43.463 74.238z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-triangle-2": { "mask-image": `url("data:image/svg+xml,%3csvg width='174' height='150' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m87 .738 86.603-.184-43.463 74.238L87 149.214 43.86 74.792.397.554z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-triangle-3": { "mask-image": `url("data:image/svg+xml,%3csvg width='150' height='174' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m149.369 87.107.185 86.603-74.239-43.463L.893 87.107l74.422-43.14L149.554.505z' fill-rule='evenodd'/%3e%3c/svg%3e")` }, ".mask-triangle-4": { "mask-image": `url("data:image/svg+xml,%3csvg width='150' height='174' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M.631 87.107.446.505l74.239 43.462 74.422 43.14-74.422 43.14L.446 173.71z' fill-rule='evenodd'/%3e%3c/svg%3e")` } }; + +// packages/daisyui/components/mask/index.js +var mask_default = ({ addComponents, prefix = "" }) => { + const prefixedmask = addPrefix(object_default13, prefix); + addComponents({ ...prefixedmask }); +}; + +// packages/daisyui/components/fieldset/object.js +var object_default14 = { ".fieldset": { display: "grid", gap: "calc(0.25rem * 1.5)", "padding-block": "calc(0.25rem * 1)", "font-size": "0.75rem", "grid-template-columns": "1fr", "grid-auto-rows": "max-content" }, ".fieldset-legend": { "margin-bottom": "calc(0.25rem * -1)", display: "flex", "align-items": "center", "justify-content": "space-between", gap: "calc(0.25rem * 2)", "padding-block": "calc(0.25rem * 2)", color: "var(--color-base-content)", "font-weight": 600 }, ".fieldset-label": { display: "flex", "align-items": "center", gap: "calc(0.25rem * 1.5)", color: "color-mix(in oklab, var(--color-base-content) 60%, transparent)", "&:has(input)": { cursor: "pointer" } } }; + +// packages/daisyui/components/fieldset/index.js +var fieldset_default = ({ addComponents, prefix = "" }) => { + const prefixedfieldset = addPrefix(object_default14, prefix); + addComponents({ ...prefixedfieldset }); +}; + +// packages/daisyui/components/dropdown/object.js +var object_default15 = { ".dropdown": { position: "relative", display: "inline-block", "position-area": "var(--anchor-v, bottom) var(--anchor-h, span-right)", "& > *:not(summary):focus": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } }, ".dropdown-content": { position: "absolute" }, "&:not(details, .dropdown-open, .dropdown-hover:hover, :focus-within)": { ".dropdown-content": { display: "none", "transform-origin": "top", opacity: "0%", scale: "95%" } }, "&[popover], .dropdown-content": { "z-index": 999, animation: "dropdown 0.2s", "transition-property": "opacity, scale, display", "transition-behavior": "allow-discrete", "transition-duration": "0.2s", "transition-timing-function": "cubic-bezier(0.4, 0, 0.2, 1)" }, "@starting-style": { "&[popover], .dropdown-content": { scale: "95%", opacity: 0 } }, "&.dropdown-open, &:not(.dropdown-hover):focus, &:focus-within": { "> [tabindex]:first-child": { "pointer-events": "none" }, ".dropdown-content": { opacity: "100%" } }, "&.dropdown-hover:hover": { ".dropdown-content": { opacity: "100%", scale: "100%" } }, "&:is(details)": { summary: { "&::-webkit-details-marker": { display: "none" } } }, "&.dropdown-open, &:focus, &:focus-within": { ".dropdown-content": { scale: "100%" } }, "&:where([popover])": { background: "#0000" }, "&[popover]": { position: "fixed", color: "inherit", "@supports not (position-area: bottom)": { margin: "auto", "&.dropdown-open:not(:popover-open)": { display: "none", "transform-origin": "top", opacity: "0%", scale: "95%" }, "&::backdrop": { "background-color": "color-mix(in oklab, #000 30%, #0000)" } }, "&:not(.dropdown-open, :popover-open)": { display: "none", "transform-origin": "top", opacity: "0%", scale: "95%" } } }, ".dropdown-start": { "--anchor-h": "span-right", ":where(.dropdown-content)": { "inset-inline-end": "auto" }, "&.dropdown-left": { "--anchor-h": "left", "--anchor-v": "span-bottom", ".dropdown-content": { top: "calc(0.25rem * 0)", bottom: "auto" } }, "&.dropdown-right": { "--anchor-h": "right", "--anchor-v": "span-bottom", ".dropdown-content": { top: "calc(0.25rem * 0)", bottom: "auto" } } }, ".dropdown-center": { "--anchor-h": "center", ":where(.dropdown-content)": { "inset-inline-end": "calc(1/2 * 100%)", translate: "50% 0", '[dir="rtl"] &': { translate: "-50% 0" } }, "&.dropdown-left": { "--anchor-h": "left", "--anchor-v": "center", ".dropdown-content": { top: "auto", bottom: "calc(1/2 * 100%)", translate: "0 50%" } }, "&.dropdown-right": { "--anchor-h": "right", "--anchor-v": "center", ".dropdown-content": { top: "auto", bottom: "calc(1/2 * 100%)", translate: "0 50%" } } }, ".dropdown-end": { "--anchor-h": "span-left", ":where(.dropdown-content)": { "inset-inline-end": "calc(0.25rem * 0)", translate: "0 0" }, "&.dropdown-left": { "--anchor-h": "left", "--anchor-v": "span-top", ".dropdown-content": { top: "auto", bottom: "calc(0.25rem * 0)" } }, "&.dropdown-right": { "--anchor-h": "right", "--anchor-v": "span-top", ".dropdown-content": { top: "auto", bottom: "calc(0.25rem * 0)" } } }, ".dropdown-left": { "--anchor-h": "left", "--anchor-v": "span-bottom", ".dropdown-content": { "inset-inline-end": "100%", top: "calc(0.25rem * 0)", bottom: "auto", "transform-origin": "right" } }, ".dropdown-right": { "--anchor-h": "right", "--anchor-v": "span-bottom", ".dropdown-content": { "inset-inline-start": "100%", top: "calc(0.25rem * 0)", bottom: "auto", "transform-origin": "left" } }, ".dropdown-bottom": { "--anchor-v": "bottom", ".dropdown-content": { top: "100%", bottom: "auto", "transform-origin": "top" } }, ".dropdown-top": { "--anchor-v": "top", ".dropdown-content": { top: "auto", bottom: "100%", "transform-origin": "bottom" } }, "@keyframes dropdown": { "0%": { opacity: 0 } } }; + +// packages/daisyui/components/dropdown/index.js +var dropdown_default = ({ addComponents, prefix = "" }) => { + const prefixeddropdown = addPrefix(object_default15, prefix); + addComponents({ ...prefixeddropdown }); +}; + +// packages/daisyui/components/card/object.js +var object_default16 = { ".card": { position: "relative", display: "flex", "flex-direction": "column", "border-radius": "var(--radius-box)", "outline-width": "2px", transition: "outline 0.2s ease-in-out", outline: "0 solid #0000", "outline-offset": "2px", "&:focus": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } }, "&:focus-visible": { "outline-color": "currentColor" }, ":where(figure:first-child)": { overflow: "hidden", "border-start-start-radius": "inherit", "border-start-end-radius": "inherit", "border-end-start-radius": "unset", "border-end-end-radius": "unset" }, ":where(figure:last-child)": { overflow: "hidden", "border-start-start-radius": "unset", "border-start-end-radius": "unset", "border-end-start-radius": "inherit", "border-end-end-radius": "inherit" }, "&:where(.card-border)": { border: "var(--border) solid var(--color-base-200)" }, "&:where(.card-dash)": { border: "var(--border) dashed var(--color-base-200)" }, "&.image-full": { display: "grid", "> *": { "grid-column-start": "1", "grid-row-start": "1" }, "> .card-body": { position: "relative", color: "var(--color-neutral-content)" }, ":where(figure)": { overflow: "hidden", "border-radius": "inherit" }, "> figure img": { height: "100%", "object-fit": "cover", filter: "brightness(28%)" } }, figure: { display: "flex", "align-items": "center", "justify-content": "center" }, '&:has(> input:is(input[type="checkbox"], input[type="radio"]))': { cursor: "pointer", "user-select": "none" }, "&:has(> :checked)": { outline: "2px solid currentColor" } }, ".card-title": { display: "flex", "align-items": "center", gap: "calc(0.25rem * 2)", "font-size": "var(--cardtitle-fs, 1.125rem)", "font-weight": 600 }, ".card-body": { display: "flex", flex: "auto", "flex-direction": "column", gap: "calc(0.25rem * 2)", padding: "var(--card-p, 1.5rem)", "font-size": "var(--card-fs, 0.875rem)", ":where(p)": { "flex-grow": 1 } }, ".card-actions": { display: "flex", "flex-wrap": "wrap", "align-items": "flex-start", gap: "calc(0.25rem * 2)" }, ".card-xs": { ".card-body": { "--card-p": "0.5rem", "--card-fs": "0.6875rem" }, ".card-title": { "--cardtitle-fs": "0.875rem" } }, ".card-sm": { ".card-body": { "--card-p": "1rem", "--card-fs": "0.75rem" }, ".card-title": { "--cardtitle-fs": "1rem" } }, ".card-md": { ".card-body": { "--card-p": "1.5rem", "--card-fs": "0.875rem" }, ".card-title": { "--cardtitle-fs": "1.125rem" } }, ".card-lg": { ".card-body": { "--card-p": "2rem", "--card-fs": "1rem" }, ".card-title": { "--cardtitle-fs": "1.25rem" } }, ".card-xl": { ".card-body": { "--card-p": "2.5rem", "--card-fs": "1.125rem" }, ".card-title": { "--cardtitle-fs": "1.375rem" } }, ".card-side": { "align-items": "stretch", "flex-direction": "row", ":where(figure:first-child)": { overflow: "hidden", "border-start-start-radius": "inherit", "border-start-end-radius": "unset", "border-end-start-radius": "inherit", "border-end-end-radius": "unset" }, ":where(figure:last-child)": { overflow: "hidden", "border-start-start-radius": "unset", "border-start-end-radius": "inherit", "border-end-start-radius": "unset", "border-end-end-radius": "inherit" }, "figure > *": { "max-width": "unset" }, ":where(figure > *)": { width: "100%", height: "100%", "object-fit": "cover" } } }; + +// packages/daisyui/components/card/index.js +var card_default = ({ addComponents, prefix = "" }) => { + const prefixedcard = addPrefix(object_default16, prefix); + addComponents({ ...prefixedcard }); +}; + +// packages/daisyui/components/steps/object.js +var object_default17 = { ".steps": { display: "inline-grid", "grid-auto-flow": "column", overflow: "hidden", "overflow-x": "auto", "counter-reset": "step", "grid-auto-columns": "1fr", ".step": { display: "grid", "grid-template-columns": ["repeat(1, minmax(0, 1fr))", "auto"], "grid-template-rows": ["repeat(2, minmax(0, 1fr))", "40px 1fr"], "place-items": "center", "text-align": "center", "min-width": "4rem", "--step-bg": "var(--color-base-300)", "--step-fg": "var(--color-base-content)", "&:before": { top: "calc(0.25rem * 0)", "grid-column-start": "1", "grid-row-start": "1", height: "calc(0.25rem * 2)", width: "100%", border: "1px solid", color: "var(--step-bg)", "background-color": "var(--step-bg)", "--tw-content": '""', content: "var(--tw-content)", "margin-inline-start": "-100%" }, "> .step-icon, &:not(:has(.step-icon)):after": { content: "counter(step)", "counter-increment": "step", "z-index": 1, color: "var(--step-fg)", "background-color": "var(--step-bg)", border: "1px solid var(--step-bg)", position: "relative", "grid-column-start": "1", "grid-row-start": "1", display: "grid", height: "calc(0.25rem * 8)", width: "calc(0.25rem * 8)", "place-items": "center", "place-self": "center", "border-radius": "calc(infinity * 1px)" }, "&:first-child:before": { content: "none" }, "&[data-content]:after": { content: "attr(data-content)" } }, ".step-neutral": { "+ .step-neutral:before, &:after, > .step-icon": { "--step-bg": "var(--color-neutral)", "--step-fg": "var(--color-neutral-content)" } }, ".step-primary": { "+ .step-primary:before, &:after, > .step-icon": { "--step-bg": "var(--color-primary)", "--step-fg": "var(--color-primary-content)" } }, ".step-secondary": { "+ .step-secondary:before, &:after, > .step-icon": { "--step-bg": "var(--color-secondary)", "--step-fg": "var(--color-secondary-content)" } }, ".step-accent": { "+ .step-accent:before, &:after, > .step-icon": { "--step-bg": "var(--color-accent)", "--step-fg": "var(--color-accent-content)" } }, ".step-info": { "+ .step-info:before, &:after, > .step-icon": { "--step-bg": "var(--color-info)", "--step-fg": "var(--color-info-content)" } }, ".step-success": { "+ .step-success:before, &:after, > .step-icon": { "--step-bg": "var(--color-success)", "--step-fg": "var(--color-success-content)" } }, ".step-warning": { "+ .step-warning:before, &:after, > .step-icon": { "--step-bg": "var(--color-warning)", "--step-fg": "var(--color-warning-content)" } }, ".step-error": { "+ .step-error:before, &:after, > .step-icon": { "--step-bg": "var(--color-error)", "--step-fg": "var(--color-error-content)" } } }, ".steps-horizontal": { "grid-auto-columns": "1fr", display: "inline-grid", "grid-auto-flow": "column", overflow: "hidden", "overflow-x": "auto", ".step": { display: "grid", "grid-template-columns": ["repeat(1, minmax(0, 1fr))", "auto"], "grid-template-rows": ["repeat(2, minmax(0, 1fr))", "40px 1fr"], "place-items": "center", "text-align": "center", "min-width": "4rem", "&:before": { height: "calc(0.25rem * 2)", width: "100%", translate: "0", content: '""', "margin-inline-start": "-100%" }, '[dir="rtl"] &:before': { translate: "0" } } }, ".steps-vertical": { "grid-auto-rows": "1fr", "grid-auto-flow": "row", ".step": { display: "grid", "grid-template-columns": ["repeat(2, minmax(0, 1fr))", "40px 1fr"], "grid-template-rows": ["repeat(1, minmax(0, 1fr))", "auto"], gap: "0.5rem", "min-height": "4rem", "justify-items": "start", "&:before": { height: "100%", width: "calc(0.25rem * 2)", translate: "-50% -50%", "margin-inline-start": "50%" }, '[dir="rtl"] &:before': { translate: "50% -50%" } } } }; + +// packages/daisyui/components/steps/index.js +var steps_default = ({ addComponents, prefix = "" }) => { + const prefixedsteps = addPrefix(object_default17, prefix); + addComponents({ ...prefixedsteps }); +}; + +// packages/daisyui/components/alert/object.js +var object_default18 = { ".alert": { display: "grid", "align-items": "center", gap: "calc(0.25rem * 4)", "border-radius": "var(--radius-box)", "padding-inline": "calc(0.25rem * 4)", "padding-block": "calc(0.25rem * 3)", color: "var(--color-base-content)", "background-color": "var(--alert-color, var(--color-base-200))", "justify-content": "start", "justify-items": "start", "grid-auto-flow": "column", "grid-template-columns": "auto", "text-align": "start", border: "var(--border) solid var(--color-base-200)", "font-size": "0.875rem", "line-height": "1.25rem", "background-size": "auto, calc(var(--noise) * 100%)", "background-image": "none, var(--fx-noise)", "box-shadow": "0 3px 0 -2px oklch(100% 0 0 / calc(var(--depth) * 0.08)) inset, 0 1px color-mix( in oklab, color-mix(in oklab, #000 20%, var(--alert-color, var(--color-base-200))) calc(var(--depth) * 20%), #0000 ), 0 4px 3px -2px oklch(0% 0 0 / calc(var(--depth) * 0.08))", "&:has(:nth-child(2))": { "grid-template-columns": "auto minmax(auto, 1fr)" }, "&.alert-outline": { "background-color": "transparent", color: "var(--alert-color)", "box-shadow": "none", "background-image": "none" }, "&.alert-dash": { "background-color": "transparent", color: "var(--alert-color)", "border-style": "dashed", "box-shadow": "none", "background-image": "none" }, "&.alert-soft": { color: "var(--alert-color, var(--color-base-content))", background: "color-mix( in oklab, var(--alert-color, var(--color-base-content)) 8%, var(--color-base-100) )", "border-color": "color-mix( in oklab, var(--alert-color, var(--color-base-content)) 10%, var(--color-base-100) )", "box-shadow": "none", "background-image": "none" } }, ".alert-info": { "border-color": "var(--color-info)", color: "var(--color-info-content)", "--alert-color": "var(--color-info)" }, ".alert-success": { "border-color": "var(--color-success)", color: "var(--color-success-content)", "--alert-color": "var(--color-success)" }, ".alert-warning": { "border-color": "var(--color-warning)", color: "var(--color-warning-content)", "--alert-color": "var(--color-warning)" }, ".alert-error": { "border-color": "var(--color-error)", color: "var(--color-error-content)", "--alert-color": "var(--color-error)" }, ".alert-vertical": { "justify-content": "center", "justify-items": "center", "grid-auto-flow": "row", "grid-template-columns": "auto", "text-align": "center", "&:has(:nth-child(2))": { "grid-template-columns": "auto" } }, ".alert-horizontal": { "justify-content": "start", "justify-items": "start", "grid-auto-flow": "column", "grid-template-columns": "auto", "text-align": "start", "&:has(:nth-child(2))": { "grid-template-columns": "auto minmax(auto, 1fr)" } } }; + +// packages/daisyui/components/alert/index.js +var alert_default = ({ addComponents, prefix = "" }) => { + const prefixedalert = addPrefix(object_default18, prefix); + addComponents({ ...prefixedalert }); +}; + +// packages/daisyui/components/kbd/object.js +var object_default19 = { ".kbd": { display: "inline-flex", "align-items": "center", "justify-content": "center", "border-radius": "var(--radius-field)", "background-color": "var(--color-base-200)", "vertical-align": "middle", "padding-left": "0.5em", "padding-right": "0.5em", border: "var(--border) solid color-mix(in srgb, var(--color-base-content) 20%, #0000)", "border-bottom": "calc(var(--border) + 1px) solid color-mix(in srgb, var(--color-base-content) 20%, #0000)", "--size": "calc(var(--size-selector, 0.25rem) * 6)", "font-size": "0.875rem", height: "var(--size)", "min-width": "var(--size)" }, ".kbd-xs": { "--size": "calc(var(--size-selector, 0.25rem) * 4)", "font-size": "0.625rem" }, ".kbd-sm": { "--size": "calc(var(--size-selector, 0.25rem) * 5)", "font-size": "0.75rem" }, ".kbd-md": { "--size": "calc(var(--size-selector, 0.25rem) * 6)", "font-size": "0.875rem" }, ".kbd-lg": { "--size": "calc(var(--size-selector, 0.25rem) * 7)", "font-size": "1rem" }, ".kbd-xl": { "--size": "calc(var(--size-selector, 0.25rem) * 8)", "font-size": "1.125rem" } }; + +// packages/daisyui/components/kbd/index.js +var kbd_default = ({ addComponents, prefix = "" }) => { + const prefixedkbd = addPrefix(object_default19, prefix); + addComponents({ ...prefixedkbd }); +}; + +// packages/daisyui/components/select/object.js +var object_default20 = { ".select": { border: "var(--border) solid #0000", position: "relative", display: "inline-flex", "flex-shrink": 1, appearance: "none", "align-items": "center", gap: "calc(0.25rem * 1.5)", "background-color": "var(--color-base-100)", "padding-inline-start": "calc(0.25rem * 4)", "padding-inline-end": "calc(0.25rem * 7)", "vertical-align": "middle", width: "clamp(3rem, 20rem, 100%)", height: "var(--size)", "font-size": "0.875rem", "border-start-start-radius": "var(--join-ss, var(--radius-field))", "border-start-end-radius": "var(--join-se, var(--radius-field))", "border-end-start-radius": "var(--join-es, var(--radius-field))", "border-end-end-radius": "var(--join-ee, var(--radius-field))", "background-image": "linear-gradient(45deg, #0000 50%, currentColor 50%), linear-gradient(135deg, currentColor 50%, #0000 50%)", "background-position": "calc(100% - 20px) calc(1px + 50%), calc(100% - 16.1px) calc(1px + 50%)", "background-size": "4px 4px, 4px 4px", "background-repeat": "no-repeat", "text-overflow": "ellipsis", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset", "border-color": "var(--input-color)", "--input-color": "color-mix(in oklab, var(--color-base-content) 20%, #0000)", "--size": "calc(var(--size-field, 0.25rem) * 10)", '[dir="rtl"] &': { "background-position": "calc(0% + 12px) calc(1px + 50%), calc(0% + 16px) calc(1px + 50%)" }, select: { "margin-inline-start": "calc(0.25rem * -4)", "margin-inline-end": "calc(0.25rem * -7)", width: "calc(100% + 2.75rem)", appearance: "none", "padding-inline-start": "calc(0.25rem * 4)", "padding-inline-end": "calc(0.25rem * 7)", height: "calc(100% - 2px)", background: "inherit", "border-radius": "inherit", "border-style": "none", "&:focus, &:focus-within": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } }, "&:not(:last-child)": { "margin-inline-end": "calc(0.25rem * -5.5)", "background-image": "none" } }, "&:focus, &:focus-within": { "--input-color": "var(--color-base-content)", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000)", outline: "2px solid var(--input-color)", "outline-offset": "2px", isolation: "isolate", "z-index": 1 }, "&:has(> select[disabled]), &:is(:disabled, [disabled])": { cursor: "not-allowed", "border-color": "var(--color-base-200)", "background-color": "var(--color-base-200)", color: "color-mix(in oklab, var(--color-base-content) 40%, transparent)", "&::placeholder": { color: "color-mix(in oklab, var(--color-base-content) 20%, transparent)" } }, "&:has(> select[disabled]) > select[disabled]": { cursor: "not-allowed" } }, ".select-ghost": { "background-color": "transparent", transition: "background-color 0.2s", "box-shadow": "none", "border-color": "#0000", "&:focus, &:focus-within": { "background-color": "var(--color-base-100)", color: "var(--color-base-content)", "border-color": "#0000", "box-shadow": "none" } }, ".select-neutral": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-neutral)" } }, ".select-primary": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-primary)" } }, ".select-secondary": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-secondary)" } }, ".select-accent": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-accent)" } }, ".select-info": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-info)" } }, ".select-success": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-success)" } }, ".select-warning": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-warning)" } }, ".select-error": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-error)" } }, ".select-xs": { "--size": "calc(var(--size-field, 0.25rem) * 6)", "font-size": "0.6875rem" }, ".select-sm": { "--size": "calc(var(--size-field, 0.25rem) * 8)", "font-size": "0.75rem" }, ".select-md": { "--size": "calc(var(--size-field, 0.25rem) * 10)", "font-size": "0.875rem" }, ".select-lg": { "--size": "calc(var(--size-field, 0.25rem) * 12)", "font-size": "1.125rem" }, ".select-xl": { "--size": "calc(var(--size-field, 0.25rem) * 14)", "font-size": "1.375rem" } }; + +// packages/daisyui/components/select/index.js +var select_default = ({ addComponents, prefix = "" }) => { + const prefixedselect = addPrefix(object_default20, prefix); + addComponents({ ...prefixedselect }); +}; + +// packages/daisyui/components/progress/object.js +var object_default21 = { ".progress": { position: "relative", height: "calc(0.25rem * 2)", width: "100%", appearance: "none", overflow: "hidden", "border-radius": "var(--radius-box)", "background-color": "color-mix(in oklab, currentColor 20%, transparent)", color: "var(--color-base-content)", "&:indeterminate": { "background-image": "repeating-linear-gradient( 90deg, currentColor -1%, currentColor 10%, #0000 10%, #0000 90% )", "background-size": "200%", "background-position-x": "15%", animation: "progress 5s ease-in-out infinite", "@supports (-moz-appearance: none)": { "&::-moz-progress-bar": { "background-color": "transparent", "background-image": "repeating-linear-gradient( 90deg, currentColor -1%, currentColor 10%, #0000 10%, #0000 90% )", "background-size": "200%", "background-position-x": "15%", animation: "progress 5s ease-in-out infinite" } } }, "@supports (-moz-appearance: none)": { "&::-moz-progress-bar": { "border-radius": "var(--radius-box)", "background-color": "currentColor" } }, "@supports (-webkit-appearance: none)": { "&::-webkit-progress-bar": { "border-radius": "var(--radius-box)", "background-color": "transparent" }, "&::-webkit-progress-value": { "border-radius": "var(--radius-box)", "background-color": "currentColor" } } }, ".progress-primary": { color: "var(--color-primary)" }, ".progress-secondary": { color: "var(--color-secondary)" }, ".progress-accent": { color: "var(--color-accent)" }, ".progress-neutral": { color: "var(--color-neutral)" }, ".progress-info": { color: "var(--color-info)" }, ".progress-success": { color: "var(--color-success)" }, ".progress-warning": { color: "var(--color-warning)" }, ".progress-error": { color: "var(--color-error)" }, "@keyframes progress": { "50%": { "background-position-x": "-115%" } } }; + +// packages/daisyui/components/progress/index.js +var progress_default = ({ addComponents, prefix = "" }) => { + const prefixedprogress = addPrefix(object_default21, prefix); + addComponents({ ...prefixedprogress }); +}; + +// packages/daisyui/components/fileinput/object.js +var object_default22 = { ".file-input": { cursor: ["pointer", "pointer"], border: "var(--border) solid #0000", display: "inline-flex", appearance: "none", "align-items": "center", "background-color": "var(--color-base-100)", "vertical-align": "middle", "webkit-user-select": "none", "user-select": "none", width: "clamp(3rem, 20rem, 100%)", height: "var(--size)", "padding-inline-end": "0.75rem", "font-size": "0.875rem", "line-height": 2, "border-start-start-radius": "var(--join-ss, var(--radius-field))", "border-start-end-radius": "var(--join-se, var(--radius-field))", "border-end-start-radius": "var(--join-es, var(--radius-field))", "border-end-end-radius": "var(--join-ee, var(--radius-field))", "border-color": "var(--input-color)", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset", "--size": "calc(var(--size-field, 0.25rem) * 10)", "--input-color": "color-mix(in oklab, var(--color-base-content) 20%, #0000)", "&::file-selector-button": { "margin-inline-end": "calc(0.25rem * 4)", cursor: "pointer", "padding-inline": "calc(0.25rem * 4)", "webkit-user-select": "none", "user-select": "none", height: "calc(100% + var(--border) * 2)", "margin-block": "calc(var(--border) * -1)", "margin-inline-start": "calc(var(--border) * -1)", "font-size": "0.875rem", color: "var(--btn-fg)", "border-width": "var(--border)", "border-style": "solid", "border-color": "var(--btn-border)", "border-start-start-radius": "calc(var(--join-ss, var(--radius-field) - var(--border)))", "border-end-start-radius": "calc(var(--join-es, var(--radius-field) - var(--border)))", "font-weight": 600, "background-color": "var(--btn-bg)", "background-size": "calc(var(--noise) * 100%)", "background-image": "var(--btn-noise)", "text-shadow": "0 0.5px oklch(1 0 0 / calc(var(--depth) * 0.15))", "box-shadow": "0 0.5px 0 0.5px color-mix( in oklab, color-mix(in oklab, white 30%, var(--btn-bg)) calc(var(--depth) * 20%), #0000 ) inset, var(--btn-shadow)", "--size": "calc(var(--size-field, 0.25rem) * 10)", "--btn-bg": "var(--btn-color, var(--color-base-200))", "--btn-fg": "var(--color-base-content)", "--btn-border": "color-mix(in oklab, var(--btn-bg), #000 5%)", "--btn-shadow": `0 3px 2px -2px color-mix(in oklab, var(--btn-bg) 30%, #0000), + 0 4px 3px -2px color-mix(in oklab, var(--btn-bg) 30%, #0000)`, "--btn-noise": "var(--fx-noise)" }, "&:focus": { "--input-color": "var(--color-base-content)", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) 10%, #0000)", outline: "2px solid var(--input-color)", "outline-offset": "2px", isolation: "isolate" }, "&:has(> input[disabled]), &:is(:disabled, [disabled])": { cursor: "not-allowed", "border-color": "var(--color-base-200)", "background-color": "var(--color-base-200)", "&::placeholder": { color: "color-mix(in oklab, var(--color-base-content) 20%, transparent)" }, "box-shadow": "none", color: "color-mix(in oklch, var(--color-base-content) 20%, #0000)", "&::file-selector-button": { cursor: "not-allowed", "border-color": "var(--color-base-200)", "background-color": "var(--color-base-200)", "--btn-border": "#0000", "--btn-noise": "none", "--btn-fg": "color-mix(in oklch, var(--color-base-content) 20%, #0000)" } } }, ".file-input-ghost": { "background-color": "transparent", transition: "background-color 0.2s", "box-shadow": "none", "border-color": "#0000", "&::file-selector-button": { "margin-inline-start": "calc(0.25rem * 0)", "margin-inline-end": "calc(0.25rem * 4)", height: "100%", cursor: "pointer", "padding-inline": "calc(0.25rem * 4)", "webkit-user-select": "none", "user-select": "none", "margin-block": "0", "border-start-end-radius": "calc(var(--join-ss, var(--radius-field) - var(--border)))", "border-end-end-radius": "calc(var(--join-es, var(--radius-field) - var(--border)))" }, "&:focus, &:focus-within": { "background-color": "var(--color-base-100)", color: "var(--color-base-content)", "border-color": "#0000", "box-shadow": "none" } }, ".file-input-neutral": { "--btn-color": "var(--color-neutral)", "&::file-selector-button": { color: "var(--color-neutral-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-neutral)" } }, ".file-input-primary": { "--btn-color": "var(--color-primary)", "&::file-selector-button": { color: "var(--color-primary-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-primary)" } }, ".file-input-secondary": { "--btn-color": "var(--color-secondary)", "&::file-selector-button": { color: "var(--color-secondary-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-secondary)" } }, ".file-input-accent": { "--btn-color": "var(--color-accent)", "&::file-selector-button": { color: "var(--color-accent-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-accent)" } }, ".file-input-info": { "--btn-color": "var(--color-info)", "&::file-selector-button": { color: "var(--color-info-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-info)" } }, ".file-input-success": { "--btn-color": "var(--color-success)", "&::file-selector-button": { color: "var(--color-success-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-success)" } }, ".file-input-warning": { "--btn-color": "var(--color-warning)", "&::file-selector-button": { color: "var(--color-warning-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-warning)" } }, ".file-input-error": { "--btn-color": "var(--color-error)", "&::file-selector-button": { color: "var(--color-error-content)" }, "&, &:focus, &:focus-within": { "--input-color": "var(--color-error)" } }, ".file-input-xs": { "--size": "calc(var(--size-field, 0.25rem) * 6)", "font-size": "0.6875rem", "line-height": "1rem", "&::file-selector-button": { "font-size": "0.6875rem" } }, ".file-input-sm": { "--size": "calc(var(--size-field, 0.25rem) * 8)", "font-size": "0.75rem", "line-height": "1.5rem", "&::file-selector-button": { "font-size": "0.75rem" } }, ".file-input-md": { "--size": "calc(var(--size-field, 0.25rem) * 10)", "font-size": "0.875rem", "line-height": 2, "&::file-selector-button": { "font-size": "0.875rem" } }, ".file-input-lg": { "--size": "calc(var(--size-field, 0.25rem) * 12)", "font-size": "1.125rem", "line-height": "2.5rem", "&::file-selector-button": { "font-size": "1.125rem" } }, ".file-input-xl": { "padding-inline-end": "calc(0.25rem * 6)", "--size": "calc(var(--size-field, 0.25rem) * 14)", "font-size": "1.125rem", "line-height": "3rem", "&::file-selector-button": { "font-size": "1.375rem" } } }; + +// packages/daisyui/components/fileinput/index.js +var fileinput_default = ({ addComponents, prefix = "" }) => { + const prefixedfileinput = addPrefix(object_default22, prefix); + addComponents({ ...prefixedfileinput }); +}; + +// packages/daisyui/components/modal/object.js +var object_default23 = { ".modal": { "pointer-events": "none", visibility: "hidden", position: "fixed", inset: "calc(0.25rem * 0)", margin: "calc(0.25rem * 0)", display: "grid", height: "100%", "max-height": "none", width: "100%", "max-width": "none", "align-items": "center", "justify-items": "center", "background-color": "transparent", padding: "calc(0.25rem * 0)", color: "inherit", "overflow-x": "hidden", transition: "translate 0.3s ease-out, visibility 0.3s allow-discrete, background-color 0.3s ease-out, opacity 0.1s ease-out", "overflow-y": "hidden", "overscroll-behavior": "contain", "z-index": 999, "&::backdrop": { display: "none" }, "&.modal-open, &[open], &:target": { "pointer-events": "auto", visibility: "visible", opacity: "100%", "background-color": "oklch(0% 0 0/ 0.4)", ".modal-box": { translate: "0 0", scale: "1", opacity: 1 } }, "@starting-style": { "&.modal-open, &[open], &:target": { visibility: "hidden", opacity: "0%" } } }, ".modal-action": { "margin-top": "calc(0.25rem * 6)", display: "flex", "justify-content": "flex-end", gap: "calc(0.25rem * 2)" }, ".modal-toggle": { position: "fixed", height: "calc(0.25rem * 0)", width: "calc(0.25rem * 0)", appearance: "none", opacity: "0%", "&:checked + .modal": { "pointer-events": "auto", visibility: "visible", opacity: "100%", "background-color": "oklch(0% 0 0/ 0.4)", ".modal-box": { translate: "0 0", scale: "1", opacity: 1 } }, "@starting-style": { "&:checked + .modal": { visibility: "hidden", opacity: "0%" } } }, ".modal-backdrop": { "grid-column-start": "1", "grid-row-start": "1", display: "grid", "align-self": "stretch", "justify-self": "stretch", color: "transparent", "z-index": -1, button: { cursor: "pointer" } }, ".modal-box": { "grid-column-start": "1", "grid-row-start": "1", "max-height": "100vh", width: "calc(11/12 * 100%)", "max-width": "32rem", "background-color": "var(--color-base-100)", padding: "calc(0.25rem * 6)", transition: "translate 0.3s ease-out, scale 0.3s ease-out, opacity 0.2s ease-out 0.05s, box-shadow 0.3s ease-out", "border-top-left-radius": "var(--modal-tl, var(--radius-box))", "border-top-right-radius": "var(--modal-tr, var(--radius-box))", "border-bottom-left-radius": "var(--modal-bl, var(--radius-box))", "border-bottom-right-radius": "var(--modal-br, var(--radius-box))", scale: "95%", opacity: 0, "box-shadow": "oklch(0% 0 0/ 0.25) 0px 25px 50px -12px", "overflow-y": "auto", "overscroll-behavior": "contain" }, ".modal-top": { "place-items": "start", ":where(.modal-box)": { height: "auto", width: "100%", "max-width": "none", "max-height": "calc(100vh - 5em)", translate: "0 -100%", scale: "1", "--modal-tl": "0", "--modal-tr": "0", "--modal-bl": "var(--radius-box)", "--modal-br": "var(--radius-box)" } }, ".modal-middle": { "place-items": "center", ":where(.modal-box)": { height: "auto", width: "calc(11/12 * 100%)", "max-width": "32rem", "max-height": "calc(100vh - 5em)", translate: "0 2%", scale: "98%", "--modal-tl": "var(--radius-box)", "--modal-tr": "var(--radius-box)", "--modal-bl": "var(--radius-box)", "--modal-br": "var(--radius-box)" } }, ".modal-bottom": { "place-items": "end", ":where(.modal-box)": { height: "auto", width: "100%", "max-width": "none", "max-height": "calc(100vh - 5em)", translate: "0 100%", scale: "1", "--modal-tl": "var(--radius-box)", "--modal-tr": "var(--radius-box)", "--modal-bl": "0", "--modal-br": "0" } }, ".modal-start": { "place-items": "start", ":where(.modal-box)": { height: "100vh", "max-height": "none", width: "auto", "max-width": "none", translate: "-100% 0", scale: "1", "--modal-tl": "0", "--modal-tr": "var(--radius-box)", "--modal-bl": "0", "--modal-br": "var(--radius-box)" } }, ".modal-end": { "place-items": "end", ":where(.modal-box)": { height: "100vh", "max-height": "none", width: "auto", "max-width": "none", translate: "100% 0", scale: "1", "--modal-tl": "var(--radius-box)", "--modal-tr": "0", "--modal-bl": "var(--radius-box)", "--modal-br": "0" } } }; + +// packages/daisyui/components/modal/index.js +var modal_default = ({ addComponents, prefix = "" }) => { + const prefixedmodal = addPrefix(object_default23, prefix); + addComponents({ ...prefixedmodal }); +}; + +// packages/daisyui/components/footer/object.js +var object_default24 = { ".footer": { display: "grid", width: "100%", "grid-auto-flow": "row", "place-items": "start", "column-gap": "calc(0.25rem * 4)", "row-gap": "calc(0.25rem * 10)", "font-size": "0.875rem", "line-height": "1.25rem", "& > *": { display: "grid", "place-items": "start", gap: "calc(0.25rem * 2)" }, "&.footer-center": { "grid-auto-flow": "column dense", "place-items": "center", "text-align": "center", "& > *": { "place-items": "center" } } }, ".footer-title": { "margin-bottom": "calc(0.25rem * 2)", "text-transform": "uppercase", opacity: "60%", "font-weight": 600 }, ".footer-horizontal": { "grid-auto-flow": "column", "&.footer-center": { "grid-auto-flow": "row dense" } }, ".footer-vertical": { "grid-auto-flow": "row", "&.footer-center": { "grid-auto-flow": "column dense" } } }; + +// packages/daisyui/components/footer/index.js +var footer_default = ({ addComponents, prefix = "" }) => { + const prefixedfooter = addPrefix(object_default24, prefix); + addComponents({ ...prefixedfooter }); +}; + +// packages/daisyui/components/table/object.js +var object_default25 = { ".table": { "font-size": "0.875rem", position: "relative", width: "100%", "border-radius": "var(--radius-box)", "text-align": "left", '&:where(:dir(rtl), [dir="rtl"], [dir="rtl"] *)': { "text-align": "right" }, "tr.row-hover": { "&, &:nth-child(even)": { "&:hover": { "@media (hover: hover)": { "background-color": "var(--color-base-200)" } } } }, ":where(th, td)": { "padding-inline": "calc(0.25rem * 4)", "padding-block": "calc(0.25rem * 3)", "vertical-align": "middle" }, ":where(thead, tfoot)": { "white-space": "nowrap", color: "color-mix(in oklab, var(--color-base-content) 60%, transparent)", "font-size": "0.875rem", "font-weight": 600 }, ":where(tfoot)": { "border-top": "var(--border) solid color-mix(in oklch, var(--color-base-content) 5%, #0000)" }, ":where(.table-pin-rows thead tr)": { position: "sticky", top: "calc(0.25rem * 0)", "z-index": 1, "background-color": "var(--color-base-100)" }, ":where(.table-pin-rows tfoot tr)": { position: "sticky", bottom: "calc(0.25rem * 0)", "z-index": 1, "background-color": "var(--color-base-100)" }, ":where(.table-pin-cols tr th)": { position: "sticky", right: "calc(0.25rem * 0)", left: "calc(0.25rem * 0)", "background-color": "var(--color-base-100)" }, ":where(thead tr, tbody tr:not(:last-child))": { "border-bottom": "var(--border) solid color-mix(in oklch, var(--color-base-content) 5%, #0000)" } }, ".table-zebra": { tbody: { tr: { "&:where(:nth-child(even))": { "background-color": "var(--color-base-200)", ":where(.table-pin-cols tr th)": { "background-color": "var(--color-base-200)" } }, "&.row-hover": { "&, &:where(:nth-child(even))": { "&:hover": { "@media (hover: hover)": { "background-color": "var(--color-base-300)" } } } } } } }, ".table-xs": { ":not(thead, tfoot) tr": { "font-size": "0.6875rem" }, ":where(th, td)": { "padding-inline": "calc(0.25rem * 2)", "padding-block": "calc(0.25rem * 1)" } }, ".table-sm": { ":not(thead, tfoot) tr": { "font-size": "0.75rem" }, ":where(th, td)": { "padding-inline": "calc(0.25rem * 3)", "padding-block": "calc(0.25rem * 2)" } }, ".table-md": { ":not(thead, tfoot) tr": { "font-size": "0.875rem" }, ":where(th, td)": { "padding-inline": "calc(0.25rem * 4)", "padding-block": "calc(0.25rem * 3)" } }, ".table-lg": { ":not(thead, tfoot) tr": { "font-size": "1.125rem" }, ":where(th, td)": { "padding-inline": "calc(0.25rem * 5)", "padding-block": "calc(0.25rem * 4)" } }, ".table-xl": { ":not(thead, tfoot) tr": { "font-size": "1.375rem" }, ":where(th, td)": { "padding-inline": "calc(0.25rem * 6)", "padding-block": "calc(0.25rem * 5)" } } }; + +// packages/daisyui/components/table/index.js +var table_default = ({ addComponents, prefix = "" }) => { + const prefixedtable = addPrefix(object_default25, prefix); + addComponents({ ...prefixedtable }); +}; + +// packages/daisyui/components/avatar/object.js +var object_default26 = { ".avatar-group": { display: "flex", overflow: "hidden", ":where(.avatar)": { overflow: "hidden", "border-radius": "calc(infinity * 1px)", border: "4px solid var(--color-base-100)" } }, ".avatar": { position: "relative", display: "inline-flex", "vertical-align": "middle", "& > div": { display: "block", "aspect-ratio": "1 / 1", overflow: "hidden" }, img: { height: "100%", width: "100%", "object-fit": "cover" } }, ".avatar-placeholder": { "& > div": { display: "flex", "align-items": "center", "justify-content": "center" } }, ".avatar-online": { "&:before": { content: '""', position: "absolute", "z-index": 1, display: "block", "border-radius": "calc(infinity * 1px)", "background-color": "var(--color-success)", outline: "2px solid var(--color-base-100)", width: "15%", height: "15%", top: "7%", right: "7%" } }, ".avatar-offline": { "&:before": { content: '""', position: "absolute", "z-index": 1, display: "block", "border-radius": "calc(infinity * 1px)", "background-color": "var(--color-base-300)", outline: "2px solid var(--color-base-100)", width: "15%", height: "15%", top: "7%", right: "7%" } } }; + +// packages/daisyui/components/avatar/index.js +var avatar_default = ({ addComponents, prefix = "" }) => { + const prefixedavatar = addPrefix(object_default26, prefix); + addComponents({ ...prefixedavatar }); +}; + +// packages/daisyui/components/input/object.js +var object_default27 = { ".input": { cursor: "text", border: "var(--border) solid #0000", position: "relative", display: "inline-flex", "flex-shrink": 1, appearance: "none", "align-items": "center", gap: "calc(0.25rem * 2)", "background-color": "var(--color-base-100)", "padding-inline": "calc(0.25rem * 3)", "vertical-align": "middle", "white-space": "nowrap", width: "clamp(3rem, 20rem, 100%)", height: "var(--size)", "font-size": "0.875rem", "border-start-start-radius": "var(--join-ss, var(--radius-field))", "border-start-end-radius": "var(--join-se, var(--radius-field))", "border-end-start-radius": "var(--join-es, var(--radius-field))", "border-end-end-radius": "var(--join-ee, var(--radius-field))", "border-color": "var(--input-color)", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset", "--size": "calc(var(--size-field, 0.25rem) * 10)", "--input-color": "color-mix(in oklab, var(--color-base-content) 20%, #0000)", "&:where(input)": { display: "inline-flex" }, ":where(input)": { display: "inline-flex", height: "100%", width: "100%", appearance: "none", "background-color": "transparent", border: "none", "&:focus, &:focus-within": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } } }, ':where(input[type="date"])': { display: "inline-block" }, "&:focus, &:focus-within": { "--input-color": "var(--color-base-content)", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000)", outline: "2px solid var(--input-color)", "outline-offset": "2px", isolation: "isolate", "z-index": 1 }, "&:has(> input[disabled]), &:is(:disabled, [disabled])": { cursor: "not-allowed", "border-color": "var(--color-base-200)", "background-color": "var(--color-base-200)", color: "color-mix(in oklab, var(--color-base-content) 40%, transparent)", "&::placeholder": { color: "color-mix(in oklab, var(--color-base-content) 20%, transparent)" }, "box-shadow": "none" }, "&:has(> input[disabled]) > input[disabled]": { cursor: "not-allowed" }, "&::-webkit-date-and-time-value": { "text-align": "inherit" }, '&[type="number"]': { "&::-webkit-inner-spin-button": { "margin-block": "calc(0.25rem * -3)", "margin-inline-end": "calc(0.25rem * -3)" } }, "&::-webkit-calendar-picker-indicator": { position: "absolute", "inset-inline-end": "0.75em" } }, ".input-ghost": { "background-color": "transparent", "box-shadow": "none", "border-color": "#0000", "&:focus, &:focus-within": { "background-color": "var(--color-base-100)", color: "var(--color-base-content)", "border-color": "#0000", "box-shadow": "none" } }, ".input-neutral": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-neutral)" } }, ".input-primary": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-primary)" } }, ".input-secondary": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-secondary)" } }, ".input-accent": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-accent)" } }, ".input-info": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-info)" } }, ".input-success": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-success)" } }, ".input-warning": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-warning)" } }, ".input-error": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-error)" } }, ".input-xs": { "--size": "calc(var(--size-field, 0.25rem) * 6)", "font-size": "0.6875rem", '&[type="number"]': { "&::-webkit-inner-spin-button": { "margin-block": "calc(0.25rem * -1)", "margin-inline-end": "calc(0.25rem * -3)" } } }, ".input-sm": { "--size": "calc(var(--size-field, 0.25rem) * 8)", "font-size": "0.75rem", '&[type="number"]': { "&::-webkit-inner-spin-button": { "margin-block": "calc(0.25rem * -2)", "margin-inline-end": "calc(0.25rem * -3)" } } }, ".input-md": { "--size": "calc(var(--size-field, 0.25rem) * 10)", "font-size": "0.875rem", '&[type="number"]': { "&::-webkit-inner-spin-button": { "margin-block": "calc(0.25rem * -3)", "margin-inline-end": "calc(0.25rem * -3)" } } }, ".input-lg": { "--size": "calc(var(--size-field, 0.25rem) * 12)", "font-size": "1.125rem", '&[type="number"]': { "&::-webkit-inner-spin-button": { "margin-block": "calc(0.25rem * -3)", "margin-inline-end": "calc(0.25rem * -3)" } } }, ".input-xl": { "--size": "calc(var(--size-field, 0.25rem) * 14)", "font-size": "1.375rem", '&[type="number"]': { "&::-webkit-inner-spin-button": { "margin-block": "calc(0.25rem * -4)", "margin-inline-end": "calc(0.25rem * -3)" } } } }; + +// packages/daisyui/components/input/index.js +var input_default = ({ addComponents, prefix = "" }) => { + const prefixedinput = addPrefix(object_default27, prefix); + addComponents({ ...prefixedinput }); +}; + +// packages/daisyui/components/checkbox/object.js +var object_default28 = { ".checkbox": { border: "var(--border) solid var(--input-color, color-mix(in oklab, var(--color-base-content) 20%, #0000))", position: "relative", "flex-shrink": 0, cursor: "pointer", appearance: "none", "border-radius": "var(--radius-selector)", padding: "calc(0.25rem * 1)", "vertical-align": "middle", color: "var(--color-base-content)", "box-shadow": "0 1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 0 #0000 inset, 0 0 #0000", transition: "background-color 0.2s, box-shadow 0.2s", "--size": "calc(var(--size-selector, 0.25rem) * 6)", width: "var(--size)", height: "var(--size)", "background-size": "auto, calc(var(--noise) * 100%)", "background-image": "none, var(--fx-noise)", "&:before": { "--tw-content": '""', content: "var(--tw-content)", display: "block", width: "100%", height: "100%", rotate: "45deg", "background-color": "currentColor", opacity: "0%", transition: "clip-path 0.3s, opacity 0.1s, rotate 0.3s, translate 0.3s", "transition-delay": "0.1s", "clip-path": "polygon(20% 100%, 20% 80%, 50% 80%, 50% 80%, 70% 80%, 70% 100%)", "box-shadow": "0px 3px 0 0px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset", "font-size": "1rem", "line-height": 0.75 }, "&:focus-visible": { outline: "2px solid var(--input-color, currentColor)", "outline-offset": "2px" }, '&:checked, &[aria-checked="true"]': { "background-color": "var(--input-color, #0000)", "box-shadow": "0 0 #0000 inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px oklch(0% 0 0 / calc(var(--depth) * 0.1))", "&:before": { "clip-path": "polygon(20% 100%, 20% 80%, 50% 80%, 50% 0%, 70% 0%, 70% 100%)", opacity: "100%" }, "@media (forced-colors: active)": { "&:before": { rotate: "0deg", "background-color": "transparent", "--tw-content": '"✔︎"', "clip-path": "none" } }, "@media print": { "&:before": { rotate: "0deg", "background-color": "transparent", "--tw-content": '"✔︎"', "clip-path": "none" } } }, "&:indeterminate": { "&:before": { rotate: "0deg", opacity: "100%", translate: "0 -35%", "clip-path": "polygon(20% 100%, 20% 80%, 50% 80%, 50% 80%, 80% 80%, 80% 100%)" } } }, ".checkbox-primary": { color: "var(--color-primary-content)", "--input-color": "var(--color-primary)" }, ".checkbox-secondary": { color: "var(--color-secondary-content)", "--input-color": "var(--color-secondary)" }, ".checkbox-accent": { color: "var(--color-accent-content)", "--input-color": "var(--color-accent)" }, ".checkbox-neutral": { color: "var(--color-neutral-content)", "--input-color": "var(--color-neutral)" }, ".checkbox-info": { color: "var(--color-info-content)", "--input-color": "var(--color-info)" }, ".checkbox-success": { color: "var(--color-success-content)", "--input-color": "var(--color-success)" }, ".checkbox-warning": { color: "var(--color-warning-content)", "--input-color": "var(--color-warning)" }, ".checkbox-error": { color: "var(--color-error-content)", "--input-color": "var(--color-error)" }, ".checkbox:disabled": { cursor: "not-allowed", opacity: "20%" }, ".checkbox-xs": { padding: "0.125rem", "--size": "calc(var(--size-selector, 0.25rem) * 4)" }, ".checkbox-sm": { padding: "0.1875rem", "--size": "calc(var(--size-selector, 0.25rem) * 5)" }, ".checkbox-md": { padding: "0.25rem", "--size": "calc(var(--size-selector, 0.25rem) * 6)" }, ".checkbox-lg": { padding: "0.3125rem", "--size": "calc(var(--size-selector, 0.25rem) * 7)" }, ".checkbox-xl": { padding: "0.375rem", "--size": "calc(var(--size-selector, 0.25rem) * 8)" } }; + +// packages/daisyui/components/checkbox/index.js +var checkbox_default = ({ addComponents, prefix = "" }) => { + const prefixedcheckbox = addPrefix(object_default28, prefix); + addComponents({ ...prefixedcheckbox }); +}; + +// packages/daisyui/components/badge/object.js +var object_default29 = { ".badge": { display: "inline-flex", "align-items": "center", "justify-content": "center", gap: "calc(0.25rem * 2)", "border-radius": "var(--radius-selector)", "vertical-align": "middle", color: "var(--badge-fg)", border: "var(--border) solid var(--badge-color, var(--color-base-200))", "font-size": "0.875rem", width: "fit-content", "padding-inline": "calc(0.25rem * 3 - var(--border))", "background-size": "auto, calc(var(--noise) * 100%)", "background-image": "none, var(--fx-noise)", "background-color": "var(--badge-bg)", "--badge-bg": "var(--badge-color, var(--color-base-100))", "--badge-fg": "var(--color-base-content)", "--size": "calc(var(--size-selector, 0.25rem) * 6)", height: "var(--size)", "&.badge-outline": { "--badge-fg": "var(--badge-color)", "--badge-bg": "#0000", "background-image": "none", "border-color": "currentColor" }, "&.badge-dash": { "--badge-fg": "var(--badge-color)", "--badge-bg": "#0000", "background-image": "none", "border-color": "currentColor", "border-style": "dashed" }, "&.badge-soft": { color: "var(--badge-color, var(--color-base-content))", "background-color": "color-mix( in oklab, var(--badge-color, var(--color-base-content)) 8%, var(--color-base-100) )", "border-color": "color-mix( in oklab, var(--badge-color, var(--color-base-content)) 10%, var(--color-base-100) )", "background-image": "none" } }, ".badge-primary": { "--badge-color": "var(--color-primary)", "--badge-fg": "var(--color-primary-content)" }, ".badge-secondary": { "--badge-color": "var(--color-secondary)", "--badge-fg": "var(--color-secondary-content)" }, ".badge-accent": { "--badge-color": "var(--color-accent)", "--badge-fg": "var(--color-accent-content)" }, ".badge-neutral": { "--badge-color": "var(--color-neutral)", "--badge-fg": "var(--color-neutral-content)" }, ".badge-info": { "--badge-color": "var(--color-info)", "--badge-fg": "var(--color-info-content)" }, ".badge-success": { "--badge-color": "var(--color-success)", "--badge-fg": "var(--color-success-content)" }, ".badge-warning": { "--badge-color": "var(--color-warning)", "--badge-fg": "var(--color-warning-content)" }, ".badge-error": { "--badge-color": "var(--color-error)", "--badge-fg": "var(--color-error-content)" }, ".badge-ghost": { "border-color": "var(--color-base-200)", "background-color": "var(--color-base-200)", color: "var(--color-base-content)", "background-image": "none" }, ".badge-xs": { "--size": "calc(var(--size-selector, 0.25rem) * 4)", "font-size": "0.625rem", "padding-inline": "calc(0.25rem * 2 - var(--border))" }, ".badge-sm": { "--size": "calc(var(--size-selector, 0.25rem) * 5)", "font-size": "0.75rem", "padding-inline": "calc(0.25rem * 2.5 - var(--border))" }, ".badge-md": { "--size": "calc(var(--size-selector, 0.25rem) * 6)", "font-size": "0.875rem", "padding-inline": "calc(0.25rem * 3 - var(--border))" }, ".badge-lg": { "--size": "calc(var(--size-selector, 0.25rem) * 7)", "font-size": "1rem", "padding-inline": "calc(0.25rem * 3.5 - var(--border))" }, ".badge-xl": { "--size": "calc(var(--size-selector, 0.25rem) * 8)", "font-size": "1.125rem", "padding-inline": "calc(0.25rem * 4 - var(--border))" } }; + +// packages/daisyui/components/badge/index.js +var badge_default = ({ addComponents, prefix = "" }) => { + const prefixedbadge = addPrefix(object_default29, prefix); + addComponents({ ...prefixedbadge }); +}; + +// packages/daisyui/components/status/object.js +var object_default30 = { ".status": { display: "inline-block", "aspect-ratio": "1 / 1", width: "calc(0.25rem * 2)", height: "calc(0.25rem * 2)", "border-radius": "var(--radius-selector)", "background-color": "color-mix(in oklab, var(--color-base-content) 20%, transparent)", "background-position": "center", "background-repeat": "no-repeat", "vertical-align": "middle", color: "color-mix(in srgb, #000 30%, transparent)", "@supports (color: color-mix(in lab, red, red))": { color: "color-mix(in oklab, var(--color-black) 30%, transparent)" }, "background-image": "radial-gradient( circle at 35% 30%, oklch(1 0 0 / calc(var(--depth) * 0.5)), #0000 )", "box-shadow": "0 2px 3px -1px color-mix(in oklab, currentColor calc(var(--depth) * 100%), #0000)" }, ".status-primary": { "background-color": "var(--color-primary)", color: "var(--color-primary)" }, ".status-secondary": { "background-color": "var(--color-secondary)", color: "var(--color-secondary)" }, ".status-accent": { "background-color": "var(--color-accent)", color: "var(--color-accent)" }, ".status-neutral": { "background-color": "var(--color-neutral)", color: "var(--color-neutral)" }, ".status-info": { "background-color": "var(--color-info)", color: "var(--color-info)" }, ".status-success": { "background-color": "var(--color-success)", color: "var(--color-success)" }, ".status-warning": { "background-color": "var(--color-warning)", color: "var(--color-warning)" }, ".status-error": { "background-color": "var(--color-error)", color: "var(--color-error)" }, ".status-xs": { width: "calc(0.25rem * 0.5)", height: "calc(0.25rem * 0.5)" }, ".status-sm": { width: "calc(0.25rem * 1)", height: "calc(0.25rem * 1)" }, ".status-md": { width: "calc(0.25rem * 2)", height: "calc(0.25rem * 2)" }, ".status-lg": { width: "calc(0.25rem * 3)", height: "calc(0.25rem * 3)" }, ".status-xl": { width: "calc(0.25rem * 4)", height: "calc(0.25rem * 4)" } }; + +// packages/daisyui/components/status/index.js +var status_default = ({ addComponents, prefix = "" }) => { + const prefixedstatus = addPrefix(object_default30, prefix); + addComponents({ ...prefixedstatus }); +}; + +// packages/daisyui/components/diff/object.js +var object_default31 = { ".diff": { position: "relative", display: "grid", width: "100%", overflow: "hidden", "webkit-user-select": "none", "user-select": "none", direction: "ltr", "container-type": "inline-size", "grid-template-columns": "auto 1fr", "&:focus-visible, &:has(.diff-item-1:focus-visible)": { "outline-style": "var(--tw-outline-style)", "outline-width": "2px", "outline-offset": "1px", "outline-color": "var(--color-base-content)" }, "&:focus-visible": { "outline-style": "var(--tw-outline-style)", "outline-width": "2px", "outline-offset": "1px", "outline-color": "var(--color-base-content)", ".diff-resizer": { "min-width": "90cqi", "max-width": "90cqi" } }, "&:has(.diff-item-2:focus-visible)": { "outline-style": "var(--tw-outline-style)", "outline-width": "2px", "outline-offset": "1px", ".diff-resizer": { "min-width": "10cqi", "max-width": "10cqi" } }, "@supports (-webkit-overflow-scrolling: touch) and (overflow: -webkit-paged-x)": { "&:focus": { ".diff-resizer": { "min-width": "10cqi", "max-width": "10cqi" } }, "&:has(.diff-item-1:focus)": { ".diff-resizer": { "min-width": "90cqi", "max-width": "90cqi" } } } }, ".diff-resizer": { position: "relative", top: "calc(1/2 * 100%)", "z-index": 1, "grid-column-start": "1", "grid-row-start": "1", height: "calc(0.25rem * 2)", width: "50cqi", "max-width": "calc(100cqi - 1rem)", "min-width": "1rem", resize: "horizontal", overflow: "hidden", opacity: "0%", transform: "scaleY(3) translate(0.35rem, 0.08rem)", cursor: "ew-resize", "transform-origin": "100% 100%", "clip-path": "inset(calc(100% - 0.75rem) 0 0 calc(100% - 0.75rem))", transition: "min-width 0.3s ease-out, max-width 0.3s ease-out" }, ".diff-item-2": { position: "relative", "grid-column-start": "1", "grid-row-start": "1", "&:after": { "pointer-events": "none", position: "absolute", top: "calc(1/2 * 100%)", right: "1px", bottom: "calc(0.25rem * 0)", "z-index": 2, "border-radius": "calc(infinity * 1px)", "background-color": "color-mix(in oklab, var(--color-base-100) 50%, transparent)", width: "1.2rem", height: "1.8rem", border: "2px solid var(--color-base-100)", content: '""', outline: "1px solid color-mix(in oklab, var(--color-base-content) 5%, #0000)", "outline-offset": "-3px", "backdrop-filter": "blur(8px)", "box-shadow": "0 1px 2px 0 oklch(0% 0 0 / 0.1)", translate: "50% -50%" }, "> *": { "pointer-events": "none", position: "absolute", top: "calc(0.25rem * 0)", bottom: "calc(0.25rem * 0)", left: "calc(0.25rem * 0)", height: "100%", width: "100cqi", "max-width": "none", "object-fit": "cover", "object-position": "center" }, "@supports (-webkit-overflow-scrolling: touch) and (overflow: -webkit-paged-x)": { "&:after": { content: "none" } } }, ".diff-item-1": { position: "relative", "z-index": 1, "grid-column-start": "1", "grid-row-start": "1", overflow: "hidden", "border-right": "2px solid var(--color-base-100)", "> *": { "pointer-events": "none", position: "absolute", top: "calc(0.25rem * 0)", bottom: "calc(0.25rem * 0)", left: "calc(0.25rem * 0)", height: "100%", width: "100cqi", "max-width": "none", "object-fit": "cover", "object-position": "center" } } }; + +// packages/daisyui/components/diff/index.js +var diff_default = ({ addComponents, prefix = "" }) => { + const prefixeddiff = addPrefix(object_default31, prefix); + addComponents({ ...prefixeddiff }); +}; + +// packages/daisyui/components/hero/object.js +var object_default32 = { ".hero": { display: "grid", width: "100%", "place-items": "center", "background-size": "cover", "background-position": "center", "& > *": { "grid-column-start": "1", "grid-row-start": "1" } }, ".hero-overlay": { "grid-column-start": "1", "grid-row-start": "1", height: "100%", width: "100%", "background-color": "color-mix(in oklab, var(--color-neutral) 50%, transparent)" }, ".hero-content": { isolation: "isolate", display: "flex", "max-width": "80rem", "align-items": "center", "justify-content": "center", gap: "calc(0.25rem * 4)", padding: "calc(0.25rem * 4)" } }; + +// packages/daisyui/components/hero/index.js +var hero_default = ({ addComponents, prefix = "" }) => { + const prefixedhero = addPrefix(object_default32, prefix); + addComponents({ ...prefixedhero }); +}; + +// packages/daisyui/components/toggle/object.js +var object_default33 = { ".toggle": { border: "var(--border) solid currentColor", color: "var(--input-color)", position: "relative", display: "inline-grid", "flex-shrink": 0, cursor: "pointer", appearance: "none", "place-content": "center", "vertical-align": "middle", "webkit-user-select": "none", "user-select": "none", "grid-template-columns": "0fr 1fr 1fr", "--radius-selector-max": `calc( + var(--radius-selector) + var(--radius-selector) + var(--radius-selector) + )`, "border-radius": "calc( var(--radius-selector) + min(var(--toggle-p), var(--radius-selector-max)) + min(var(--border), var(--radius-selector-max)) )", padding: "var(--toggle-p)", "box-shadow": "0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000) inset", transition: "color 0.3s, grid-template-columns 0.2s", "--input-color": "color-mix(in oklab, var(--color-base-content) 50%, #0000)", "--toggle-p": "calc(var(--size) * 0.125)", "--size": "calc(var(--size-selector, 0.25rem) * 6)", width: "calc((var(--size) * 2) - (var(--border) + var(--toggle-p)) * 2)", height: "var(--size)", "> *": { "z-index": 1, "grid-column": "span 1 / span 1", "grid-column-start": "2", "grid-row-start": "1", height: "100%", cursor: "pointer", appearance: "none", "background-color": "transparent", padding: "calc(0.25rem * 0.5)", transition: "opacity 0.2s, rotate 0.4s", border: "none", "&:focus": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } }, "&:nth-child(2)": { color: "var(--color-base-100)", rotate: "0deg" }, "&:nth-child(3)": { color: "var(--color-base-100)", opacity: "0%", rotate: "-15deg" } }, "&:has(:checked)": { "> :nth-child(2)": { opacity: "0%", rotate: "15deg" }, "> :nth-child(3)": { opacity: "100%", rotate: "0deg" } }, "&:before": { position: "relative", "inset-inline-start": "calc(0.25rem * 0)", "grid-column-start": "2", "grid-row-start": "1", "aspect-ratio": "1 / 1", height: "100%", "border-radius": "var(--radius-selector)", "background-color": "currentColor", translate: "0", "--tw-content": '""', content: "var(--tw-content)", transition: "background-color 0.1s, translate 0.2s, inset-inline-start 0.2s", "box-shadow": "0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000)", "background-size": "auto, calc(var(--noise) * 100%)", "background-image": "none, var(--fx-noise)" }, "@media (forced-colors: active)": { "&:before": { "outline-style": "var(--tw-outline-style)", "outline-width": "1px", "outline-offset": "calc(1px * -1)" } }, "@media print": { "&:before": { outline: "0.25rem solid", "outline-offset": "-1rem" } }, "&:focus-visible, &:has(:focus-visible)": { outline: "2px solid currentColor", "outline-offset": "2px" }, '&:checked, &[aria-checked="true"], &:has(> input:checked)': { "grid-template-columns": "1fr 1fr 0fr", "background-color": "var(--color-base-100)", "--input-color": "var(--color-base-content)", "&:before": { "background-color": "currentColor" }, "@starting-style": { "&:before": { opacity: 0 } } }, "&:indeterminate": { "grid-template-columns": "0.5fr 1fr 0.5fr" }, "&:disabled": { cursor: "not-allowed", opacity: "30%", "&:before": { "background-color": "transparent", border: "var(--border) solid currentColor" } } }, ".toggle-primary": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-primary)" } }, ".toggle-secondary": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-secondary)" } }, ".toggle-accent": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-accent)" } }, ".toggle-neutral": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-neutral)" } }, ".toggle-success": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-success)" } }, ".toggle-warning": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-warning)" } }, ".toggle-info": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-info)" } }, ".toggle-error": { '&:checked, &[aria-checked="true"]': { "--input-color": "var(--color-error)" } }, ".toggle-xs": { '&:is([type="checkbox"]), &:has([type="checkbox"])': { "--size": "calc(var(--size-selector, 0.25rem) * 4)" } }, ".toggle-sm": { '&:is([type="checkbox"]), &:has([type="checkbox"])': { "--size": "calc(var(--size-selector, 0.25rem) * 5)" } }, ".toggle-md": { '&:is([type="checkbox"]), &:has([type="checkbox"])': { "--size": "calc(var(--size-selector, 0.25rem) * 6)" } }, ".toggle-lg": { '&:is([type="checkbox"]), &:has([type="checkbox"])': { "--size": "calc(var(--size-selector, 0.25rem) * 7)" } }, ".toggle-xl": { '&:is([type="checkbox"]), &:has([type="checkbox"])': { "--size": "calc(var(--size-selector, 0.25rem) * 8)" } } }; + +// packages/daisyui/components/toggle/index.js +var toggle_default = ({ addComponents, prefix = "" }) => { + const prefixedtoggle = addPrefix(object_default33, prefix); + addComponents({ ...prefixedtoggle }); +}; + +// packages/daisyui/components/stack/object.js +var object_default34 = { ".stack": { display: "inline-grid", "grid-template-columns": "3px 4px 1fr 4px 3px", "grid-template-rows": "3px 4px 1fr 4px 3px", "& > *": { height: "100%", width: "100%", "&:nth-child(n + 2)": { width: "100%", opacity: "70%" }, "&:nth-child(2)": { "z-index": 2, opacity: "90%" }, "&:nth-child(1)": { "z-index": 3, width: "100%" } }, "&, &.stack-bottom": { "> *": { "grid-column": "3 / 4", "grid-row": "3 / 6", "&:nth-child(2)": { "grid-column": "2 / 5", "grid-row": "2 / 5" }, "&:nth-child(1)": { "grid-column": "1 / 6", "grid-row": "1 / 4" } } }, "&.stack-top": { "> *": { "grid-column": "3 / 4", "grid-row": "1 / 4", "&:nth-child(2)": { "grid-column": "2 / 5", "grid-row": "2 / 5" }, "&:nth-child(1)": { "grid-column": "1 / 6", "grid-row": "3 / 6" } } }, "&.stack-start": { "> *": { "grid-column": "1 / 4", "grid-row": "3 / 4", "&:nth-child(2)": { "grid-column": "2 / 5", "grid-row": "2 / 5" }, "&:nth-child(1)": { "grid-column": "3 / 6", "grid-row": "1 / 6" } } }, "&.stack-end": { "> *": { "grid-column": "3 / 6", "grid-row": "3 / 4", "&:nth-child(2)": { "grid-column": "2 / 5", "grid-row": "2 / 5" }, "&:nth-child(1)": { "grid-column": "1 / 4", "grid-row": "1 / 6" } } } } }; + +// packages/daisyui/components/stack/index.js +var stack_default = ({ addComponents, prefix = "" }) => { + const prefixedstack = addPrefix(object_default34, prefix); + addComponents({ ...prefixedstack }); +}; + +// packages/daisyui/components/navbar/object.js +var object_default35 = { ".navbar": { display: "flex", width: "100%", "align-items": "center", padding: "0.5rem", "min-height": "4rem" }, ".navbar-start": { display: "inline-flex", "align-items": "center", width: "50%", "justify-content": "flex-start" }, ".navbar-center": { display: "inline-flex", "align-items": "center", "flex-shrink": 0 }, ".navbar-end": { display: "inline-flex", "align-items": "center", width: "50%", "justify-content": "flex-end" } }; + +// packages/daisyui/components/navbar/index.js +var navbar_default = ({ addComponents, prefix = "" }) => { + const prefixednavbar = addPrefix(object_default35, prefix); + addComponents({ ...prefixednavbar }); +}; + +// packages/daisyui/components/label/object.js +var object_default36 = { ".label": { display: "inline-flex", "align-items": "center", gap: "calc(0.25rem * 1.5)", "white-space": "nowrap", color: "color-mix(in oklab, currentColor 60%, transparent)", "&:has(input)": { cursor: "pointer" }, "&:is(.input > *, .select > *)": { display: "flex", height: "calc(100% - 0.5rem)", "align-items": "center", "padding-inline": "calc(0.25rem * 3)", "white-space": "nowrap", "font-size": "inherit", "&:first-child": { "margin-inline-start": "calc(0.25rem * -3)", "margin-inline-end": "calc(0.25rem * 3)", "border-inline-end": "var(--border) solid color-mix(in oklab, currentColor 10%, #0000)" }, "&:last-child": { "margin-inline-start": "calc(0.25rem * 3)", "margin-inline-end": "calc(0.25rem * -3)", "border-inline-start": "var(--border) solid color-mix(in oklab, currentColor 10%, #0000)" } } }, ".floating-label": { position: "relative", display: "block", input: { display: "block", "&::placeholder": { transition: "top 0.1s ease-out, translate 0.1s ease-out, scale 0.1s ease-out, opacity 0.1s ease-out" } }, textarea: { "&::placeholder": { transition: "top 0.1s ease-out, translate 0.1s ease-out, scale 0.1s ease-out, opacity 0.1s ease-out" } }, "> span": { position: "absolute", "inset-inline-start": "calc(0.25rem * 3)", "z-index": 1, "background-color": "var(--color-base-100)", "padding-inline": "calc(0.25rem * 1)", opacity: "0%", "font-size": "0.875rem", top: "calc(var(--size-field, 0.25rem) * 10 / 2)", "line-height": 1, "border-radius": "2px", "pointer-events": "none", translate: "0 -50%", transition: "top 0.1s ease-out, translate 0.1s ease-out, scale 0.1s ease-out, opacity 0.1s ease-out" }, "&:focus-within, &:not(:has(input:placeholder-shown, textarea:placeholder-shown))": { "::placeholder": { opacity: "0%", top: "0", translate: "-12.5% calc(-50% - 0.125em)", scale: "0.75", "pointer-events": "auto" }, "> span": { opacity: "100%", top: "0", translate: "-12.5% calc(-50% - 0.125em)", scale: "0.75", "pointer-events": "auto", "z-index": 2 } }, "&:has(:disabled, [disabled])": { "> span": { opacity: "0%" } }, "&:has(.input-xs, .select-xs, .textarea-xs) span": { "font-size": "0.6875rem", top: "calc(var(--size-field, 0.25rem) * 6 / 2)" }, "&:has(.input-sm, .select-sm, .textarea-sm) span": { "font-size": "0.75rem", top: "calc(var(--size-field, 0.25rem) * 8 / 2)" }, "&:has(.input-md, .select-md, .textarea-md) span": { "font-size": "0.875rem", top: "calc(var(--size-field, 0.25rem) * 10 / 2)" }, "&:has(.input-lg, .select-lg, .textarea-lg) span": { "font-size": "1.125rem", top: "calc(var(--size-field, 0.25rem) * 12 / 2)" }, "&:has(.input-xl, .select-xl, .textarea-xl) span": { "font-size": "1.375rem", top: "calc(var(--size-field, 0.25rem) * 14 / 2)" } } }; + +// packages/daisyui/components/label/index.js +var label_default = ({ addComponents, prefix = "" }) => { + const prefixedlabel = addPrefix(object_default36, prefix); + addComponents({ ...prefixedlabel }); +}; + +// packages/daisyui/components/menu/object.js +var object_default37 = { ".menu": { display: "flex", width: "fit-content", "flex-direction": "column", "flex-wrap": "wrap", padding: "calc(0.25rem * 2)", "--menu-active-fg": "var(--color-neutral-content)", "--menu-active-bg": "var(--color-neutral)", "font-size": "0.875rem", ":where(li ul)": { position: "relative", "margin-inline-start": "calc(0.25rem * 4)", "padding-inline-start": "calc(0.25rem * 2)", "white-space": "nowrap", "&:before": { position: "absolute", "inset-inline-start": "calc(0.25rem * 0)", top: "calc(0.25rem * 3)", bottom: "calc(0.25rem * 3)", "background-color": "var(--color-base-content)", opacity: "10%", width: "var(--border)", content: '""' } }, ":where(li > .menu-dropdown:not(.menu-dropdown-show))": { display: "none" }, ":where(li:not(.menu-title) > *:not(ul, details, .menu-title, .btn)), :where(li:not(.menu-title) > details > summary:not(.menu-title))": { display: "grid", "grid-auto-flow": "column", "align-content": "flex-start", "align-items": "center", gap: "calc(0.25rem * 2)", "border-radius": "var(--radius-field)", "padding-inline": "calc(0.25rem * 3)", "padding-block": "calc(0.25rem * 1.5)", "text-align": "start", "transition-property": "color, background-color, box-shadow", "transition-duration": "0.2s", "transition-timing-function": "cubic-bezier(0, 0, 0.2, 1)", "grid-auto-columns": "minmax(auto, max-content) auto max-content", "text-wrap": "balance", "user-select": "none" }, ":where(li > details > summary)": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" }, "&::-webkit-details-marker": { display: "none" } }, ":where(li > details > summary), :where(li > .menu-dropdown-toggle)": { "&:after": { "justify-self": "flex-end", display: "block", height: "0.375rem", width: "0.375rem", rotate: "-135deg", translate: "0 -1px", "transition-property": "rotate, translate", "transition-duration": "0.2s", content: '""', "transform-origin": "50% 50%", "box-shadow": "2px 2px inset", "pointer-events": "none" } }, ":where(li > details[open] > summary):after, :where(li > .menu-dropdown-toggle.menu-dropdown-show):after": { rotate: "45deg", translate: "0 1px" }, ":where( li:not(.menu-title, .disabled) > *:not(ul, details, .menu-title), li:not(.menu-title, .disabled) > details > summary:not(.menu-title) ):not(.menu-active, :active, .btn)": { "&.menu-focus, &:focus-visible": { cursor: "pointer", "background-color": "color-mix(in oklab, var(--color-base-content) 10%, transparent)", color: "var(--color-base-content)", "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } } }, ":where( li:not(.menu-title, .disabled) > *:not(ul, details, .menu-title):not(.menu-active, :active, .btn):hover, li:not(.menu-title, .disabled) > details > summary:not(.menu-title):not(.menu-active, :active, .btn):hover )": { cursor: "pointer", "background-color": "color-mix(in oklab, var(--color-base-content) 10%, transparent)", "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" }, "box-shadow": "0 1px oklch(0% 0 0 / 0.01) inset, 0 -1px oklch(100% 0 0 / 0.01) inset" }, ":where(li:empty)": { "background-color": "var(--color-base-content)", opacity: "10%", margin: "0.5rem 1rem", height: "1px" }, ":where(li)": { position: "relative", display: "flex", "flex-shrink": 0, "flex-direction": "column", "flex-wrap": "wrap", "align-items": "stretch", ".badge": { "justify-self": "flex-end" }, "& > *:not(ul, .menu-title, details, .btn):active, & > *:not(ul, .menu-title, details, .btn).menu-active, & > details > summary:active": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" }, color: "var(--menu-active-fg)", "background-color": "var(--menu-active-bg)", "background-size": "auto, calc(var(--noise) * 100%)", "background-image": "none, var(--fx-noise)", "&:not(&:active)": { "box-shadow": "0 2px calc(var(--depth) * 3px) -2px var(--menu-active-bg)" } }, "&.menu-disabled": { "pointer-events": "none", color: "color-mix(in oklab, var(--color-base-content) 20%, transparent)" } }, ".dropdown:focus-within": { ".menu-dropdown-toggle:after": { rotate: "45deg", translate: "0 1px" } }, ".dropdown-content": { "margin-top": "calc(0.25rem * 2)", padding: "calc(0.25rem * 2)", "&:before": { display: "none" } } }, ".menu-title": { "padding-inline": "calc(0.25rem * 3)", "padding-block": "calc(0.25rem * 2)", color: "color-mix(in oklab, var(--color-base-content) 40%, transparent)", "font-size": "0.875rem", "font-weight": 600 }, ".menu-horizontal": { display: "inline-flex", "flex-direction": "row", "& > li:not(.menu-title) > details > ul": { position: "absolute", "margin-inline-start": "calc(0.25rem * 0)", "margin-top": "calc(0.25rem * 4)", "padding-block": "calc(0.25rem * 2)", "padding-inline-end": "calc(0.25rem * 2)" }, "& > li > details > ul": { "&:before": { content: "none" } }, ":where(& > li:not(.menu-title) > details > ul)": { "border-radius": "var(--radius-box)", "background-color": "var(--color-base-100)", "box-shadow": "0 1px 3px 0 oklch(0% 0 0/0.1), 0 1px 2px -1px oklch(0% 0 0/0.1)" } }, ".menu-vertical": { display: "inline-flex", "flex-direction": "column", "& > li:not(.menu-title) > details > ul": { position: "relative", "margin-inline-start": "calc(0.25rem * 4)", "margin-top": "calc(0.25rem * 0)", "padding-block": "calc(0.25rem * 0)", "padding-inline-end": "calc(0.25rem * 0)" } }, ".menu-xs": { ":where(li:not(.menu-title) > *:not(ul, details, .menu-title)), :where(li:not(.menu-title) > details > summary:not(.menu-title))": { "border-radius": "var(--radius-field)", "padding-inline": "calc(0.25rem * 2)", "padding-block": "calc(0.25rem * 1)", "font-size": "0.6875rem" }, ".menu-title": { "padding-inline": "calc(0.25rem * 2)", "padding-block": "calc(0.25rem * 1)" } }, ".menu-sm": { ":where(li:not(.menu-title) > *:not(ul, details, .menu-title)), :where(li:not(.menu-title) > details > summary:not(.menu-title))": { "border-radius": "var(--radius-field)", "padding-inline": "calc(0.25rem * 2.5)", "padding-block": "calc(0.25rem * 1)", "font-size": "0.75rem" }, ".menu-title": { "padding-inline": "calc(0.25rem * 3)", "padding-block": "calc(0.25rem * 2)" } }, ".menu-md": { ":where(li:not(.menu-title) > *:not(ul, details, .menu-title)), :where(li:not(.menu-title) > details > summary:not(.menu-title))": { "border-radius": "var(--radius-field)", "padding-inline": "calc(0.25rem * 3)", "padding-block": "calc(0.25rem * 1.5)", "font-size": "0.875rem" }, ".menu-title": { "padding-inline": "calc(0.25rem * 3)", "padding-block": "calc(0.25rem * 2)" } }, ".menu-lg": { ":where(li:not(.menu-title) > *:not(ul, details, .menu-title)), :where(li:not(.menu-title) > details > summary:not(.menu-title))": { "border-radius": "var(--radius-field)", "padding-inline": "calc(0.25rem * 4)", "padding-block": "calc(0.25rem * 1.5)", "font-size": "1.125rem" }, ".menu-title": { "padding-inline": "calc(0.25rem * 6)", "padding-block": "calc(0.25rem * 3)" } }, ".menu-xl": { ":where(li:not(.menu-title) > *:not(ul, details, .menu-title)), :where(li:not(.menu-title) > details > summary:not(.menu-title))": { "border-radius": "var(--radius-field)", "padding-inline": "calc(0.25rem * 5)", "padding-block": "calc(0.25rem * 1.5)", "font-size": "1.375rem" }, ".menu-title": { "padding-inline": "calc(0.25rem * 6)", "padding-block": "calc(0.25rem * 3)" } } }; + +// packages/daisyui/components/menu/index.js +var menu_default = ({ addComponents, prefix = "" }) => { + const prefixedmenu = addPrefix(object_default37, prefix); + addComponents({ ...prefixedmenu }); +}; + +// packages/daisyui/components/toast/object.js +var object_default38 = { ".toast": { position: "fixed", "inset-inline-start": "auto", "inset-inline-end": "calc(0.25rem * 4)", top: "auto", bottom: "calc(0.25rem * 4)", display: "flex", "flex-direction": "column", gap: "calc(0.25rem * 2)", "background-color": "transparent", translate: "var(--toast-x, 0) var(--toast-y, 0)", width: "max-content", "max-width": "calc(100vw - 2rem)", "& > *": { animation: "toast 0.25s ease-out" }, "&:where(.toast-start)": { "inset-inline-start": "calc(0.25rem * 4)", "inset-inline-end": "auto", "--toast-x": "0" }, "&:where(.toast-center)": { "inset-inline-start": "calc(1/2 * 100%)", "inset-inline-end": "calc(1/2 * 100%)", "--toast-x": "-50%" }, "&:where(.toast-end)": { "inset-inline-start": "auto", "inset-inline-end": "calc(0.25rem * 4)", "--toast-x": "0" }, "&:where(.toast-bottom)": { top: "auto", bottom: "calc(0.25rem * 4)", "--toast-y": "0" }, "&:where(.toast-middle)": { top: "calc(1/2 * 100%)", bottom: "auto", "--toast-y": "-50%" }, "&:where(.toast-top)": { top: "calc(0.25rem * 4)", bottom: "auto", "--toast-y": "0" } }, "@keyframes toast": { "0%": { scale: "0.9", opacity: 0 }, "100%": { scale: "1", opacity: 1 } } }; + +// packages/daisyui/components/toast/index.js +var toast_default = ({ addComponents, prefix = "" }) => { + const prefixedtoast = addPrefix(object_default38, prefix); + addComponents({ ...prefixedtoast }); +}; + +// packages/daisyui/components/button/object.js +var object_default39 = { ":where(.btn)": { width: "unset" }, ".btn": { display: "inline-flex", "flex-shrink": 0, cursor: "pointer", "flex-wrap": "nowrap", "align-items": "center", "justify-content": "center", gap: "calc(0.25rem * 1.5)", "text-align": "center", "vertical-align": "middle", "outline-offset": "2px", "webkit-user-select": "none", "user-select": "none", "padding-inline": "var(--btn-p)", color: "var(--btn-fg)", "--tw-prose-links": "var(--btn-fg)", height: "var(--size)", "font-size": "var(--fontsize, 0.875rem)", "font-weight": 600, "outline-color": "var(--btn-color, var(--color-base-content))", "transition-property": "color, background-color, border-color, box-shadow", "transition-timing-function": "cubic-bezier(0, 0, 0.2, 1)", "transition-duration": "0.2s", "border-start-start-radius": "var(--join-ss, var(--radius-field))", "border-start-end-radius": "var(--join-se, var(--radius-field))", "border-end-start-radius": "var(--join-es, var(--radius-field))", "border-end-end-radius": "var(--join-ee, var(--radius-field))", "background-color": "var(--btn-bg)", "background-size": "auto, calc(var(--noise) * 100%)", "background-image": "none, var(--btn-noise)", "border-width": "var(--border)", "border-style": "solid", "border-color": "var(--btn-border)", "text-shadow": "0 0.5px oklch(100% 0 0 / calc(var(--depth) * 0.15))", "touch-action": "manipulation", "box-shadow": "0 0.5px 0 0.5px oklch(100% 0 0 / calc(var(--depth) * 6%)) inset, var(--btn-shadow)", "--size": "calc(var(--size-field, 0.25rem) * 10)", "--btn-bg": "var(--btn-color, var(--color-base-200))", "--btn-fg": "var(--color-base-content)", "--btn-p": "1rem", "--btn-border": "color-mix(in oklab, var(--btn-bg), #000 calc(var(--depth) * 5%))", "--btn-shadow": `0 3px 2px -2px color-mix(in oklab, var(--btn-bg) calc(var(--depth) * 30%), #0000), + 0 4px 3px -2px color-mix(in oklab, var(--btn-bg) calc(var(--depth) * 30%), #0000)`, "--btn-noise": "var(--fx-noise)", ".prose &": { "text-decoration-line": "none" }, "@media (hover: hover)": { "&:hover": { "--btn-bg": "color-mix(in oklab, var(--btn-color, var(--color-base-200)), #000 7%)" } }, "&:focus-visible": { "outline-width": "2px", "outline-style": "solid", isolation: "isolate" }, "&:active:not(.btn-active)": { translate: "0 0.5px", "--btn-bg": "color-mix(in oklab, var(--btn-color, var(--color-base-200)), #000 5%)", "--btn-border": "color-mix(in oklab, var(--btn-color, var(--color-base-200)), #000 7%)", "--btn-shadow": "0 0 0 0 oklch(0% 0 0/0), 0 0 0 0 oklch(0% 0 0/0)" }, "&:is(:disabled, [disabled], .btn-disabled)": { "&:not(.btn-link, .btn-ghost)": { "background-color": "color-mix(in oklab, var(--color-base-content) 10%, transparent)", "box-shadow": "none" }, "pointer-events": "none", "--btn-border": "#0000", "--btn-noise": "none", "--btn-fg": "color-mix(in oklch, var(--color-base-content) 20%, #0000)", "@media (hover: hover)": { "&:hover": { "pointer-events": "none", "background-color": "color-mix(in oklab, var(--color-neutral) 20%, transparent)", "--btn-border": "#0000", "--btn-fg": "color-mix(in oklch, var(--color-base-content) 20%, #0000)" } } }, '&:is(input[type="checkbox"], input[type="radio"])': { appearance: "none", "&::after": { content: "attr(aria-label)" } }, "&:where(input:checked:not(.filter .btn))": { "--btn-color": "var(--color-primary)", "--btn-fg": "var(--color-primary-content)", isolation: "isolate" } }, ".btn-active": { "--btn-bg": "color-mix(in oklab, var(--btn-color, var(--color-base-200)), #000 7%)", "--btn-shadow": "0 0 0 0 oklch(0% 0 0/0), 0 0 0 0 oklch(0% 0 0/0)", isolation: "isolate" }, ".btn-primary": { "--btn-color": "var(--color-primary)", "--btn-fg": "var(--color-primary-content)" }, ".btn-secondary": { "--btn-color": "var(--color-secondary)", "--btn-fg": "var(--color-secondary-content)" }, ".btn-accent": { "--btn-color": "var(--color-accent)", "--btn-fg": "var(--color-accent-content)" }, ".btn-neutral": { "--btn-color": "var(--color-neutral)", "--btn-fg": "var(--color-neutral-content)" }, ".btn-info": { "--btn-color": "var(--color-info)", "--btn-fg": "var(--color-info-content)" }, ".btn-success": { "--btn-color": "var(--color-success)", "--btn-fg": "var(--color-success-content)" }, ".btn-warning": { "--btn-color": "var(--color-warning)", "--btn-fg": "var(--color-warning-content)" }, ".btn-error": { "--btn-color": "var(--color-error)", "--btn-fg": "var(--color-error-content)" }, ".btn-ghost": { "&:not(.btn-active, :hover, :active:focus, :focus-visible)": { "--btn-shadow": '""', "--btn-bg": "#0000", "--btn-border": "#0000", "--btn-noise": "none", "&:not(:disabled, [disabled], .btn-disabled)": { "outline-color": "currentColor", "--btn-fg": "currentColor" } } }, ".btn-link": { "text-decoration-line": "underline", "outline-color": "currentColor", "--btn-border": "#0000", "--btn-bg": "#0000", "--btn-fg": "var(--color-primary)", "--btn-noise": "none", "--btn-shadow": '""', "&:is(.btn-active, :hover, :active:focus, :focus-visible)": { "text-decoration-line": "underline", "--btn-border": "#0000", "--btn-bg": "#0000" } }, ".btn-outline": { "&:not( .btn-active, :hover, :active:focus, :focus-visible, :disabled, [disabled], .btn-disabled, :checked )": { "--btn-shadow": '""', "--btn-bg": "#0000", "--btn-fg": "var(--btn-color)", "--btn-border": "var(--btn-color)", "--btn-noise": "none" }, "@media (hover: none)": { "&:hover:not( .btn-active, :active, :focus-visible, :disabled, [disabled], .btn-disabled, :checked )": { "--btn-shadow": '""', "--btn-bg": "#0000", "--btn-fg": "var(--btn-color)", "--btn-border": "var(--btn-color)", "--btn-noise": "none" } } }, ".btn-dash": { "&:not( .btn-active, :hover, :active:focus, :focus-visible, :disabled, [disabled], .btn-disabled, :checked )": { "--btn-shadow": '""', "border-style": "dashed", "--btn-bg": "#0000", "--btn-fg": "var(--btn-color)", "--btn-border": "var(--btn-color)", "--btn-noise": "none" }, "@media (hover: none)": { "&:hover:not( .btn-active, :active, :focus-visible, :disabled, [disabled], .btn-disabled, :checked )": { "--btn-shadow": '""', "border-style": "dashed", "--btn-bg": "#0000", "--btn-fg": "var(--btn-color)", "--btn-border": "var(--btn-color)", "--btn-noise": "none" } } }, ".btn-soft": { "&:not(.btn-active, :hover, :active:focus, :focus-visible, :disabled, [disabled], .btn-disabled)": { "--btn-shadow": '""', "--btn-fg": "var(--btn-color, var(--color-base-content))", "--btn-bg": `color-mix( + in oklab, + var(--btn-color, var(--color-base-content)) 8%, + var(--color-base-100) + )`, "--btn-border": `color-mix( + in oklab, + var(--btn-color, var(--color-base-content)) 10%, + var(--color-base-100) + )`, "--btn-noise": "none" }, "@media (hover: none)": { "&:hover:not(.btn-active, :active, :focus-visible, :disabled, [disabled], .btn-disabled)": { "--btn-shadow": '""', "--btn-fg": "var(--btn-color, var(--color-base-content))", "--btn-bg": `color-mix( + in oklab, + var(--btn-color, var(--color-base-content)) 8%, + var(--color-base-100) + )`, "--btn-border": `color-mix( + in oklab, + var(--btn-color, var(--color-base-content)) 10%, + var(--color-base-100) + )`, "--btn-noise": "none" } } }, ".btn-xs": { "--fontsize": "0.6875rem", "--btn-p": "0.5rem", "--size": "calc(var(--size-field, 0.25rem) * 6)" }, ".btn-sm": { "--fontsize": "0.75rem", "--btn-p": "0.75rem", "--size": "calc(var(--size-field, 0.25rem) * 8)" }, ".btn-md": { "--fontsize": "0.875rem", "--btn-p": "1rem", "--size": "calc(var(--size-field, 0.25rem) * 10)" }, ".btn-lg": { "--fontsize": "1.125rem", "--btn-p": "1.25rem", "--size": "calc(var(--size-field, 0.25rem) * 12)" }, ".btn-xl": { "--fontsize": "1.375rem", "--btn-p": "1.5rem", "--size": "calc(var(--size-field, 0.25rem) * 14)" }, ".btn-square": { "padding-inline": "calc(0.25rem * 0)", width: "var(--size)", height: "var(--size)" }, ".btn-circle": { "border-radius": "calc(infinity * 1px)", "padding-inline": "calc(0.25rem * 0)", width: "var(--size)", height: "var(--size)" }, ".btn-wide": { width: "100%", "max-width": "calc(0.25rem * 64)" }, ".btn-block": { width: "100%" } }; + +// packages/daisyui/components/button/index.js +var button_default = ({ addComponents, prefix = "" }) => { + const prefixedbutton = addPrefix(object_default39, prefix); + addComponents({ ...prefixedbutton }); +}; + +// packages/daisyui/components/list/object.js +var object_default40 = { ".list": { display: "flex", "flex-direction": "column", "font-size": "0.875rem", ":where(.list-row)": { "--list-grid-cols": "minmax(0, auto) 1fr", position: "relative", display: "grid", "grid-auto-flow": "column", gap: "calc(0.25rem * 4)", "border-radius": "var(--radius-box)", padding: "calc(0.25rem * 4)", "word-break": "break-word", "grid-template-columns": "var(--list-grid-cols)", "&:has(.list-col-grow:nth-child(1))": { "--list-grid-cols": "1fr" }, "&:has(.list-col-grow:nth-child(2))": { "--list-grid-cols": "minmax(0, auto) 1fr" }, "&:has(.list-col-grow:nth-child(3))": { "--list-grid-cols": "minmax(0, auto) minmax(0, auto) 1fr" }, "&:has(.list-col-grow:nth-child(4))": { "--list-grid-cols": "minmax(0, auto) minmax(0, auto) minmax(0, auto) 1fr" }, "&:has(.list-col-grow:nth-child(5))": { "--list-grid-cols": "minmax(0, auto) minmax(0, auto) minmax(0, auto) minmax(0, auto) 1fr" }, "&:has(.list-col-grow:nth-child(6))": { "--list-grid-cols": `minmax(0, auto) minmax(0, auto) minmax(0, auto) minmax(0, auto) + minmax(0, auto) 1fr` }, ":not(.list-col-wrap)": { "grid-row-start": "1" } }, "& > :not(:last-child)": { "&.list-row, .list-row": { "&:after": { content: '""', "border-bottom": "var(--border) solid", "inset-inline": "var(--radius-box)", position: "absolute", bottom: "calc(0.25rem * 0)", "border-color": "color-mix(in oklab, var(--color-base-content) 5%, transparent)" } } } }, ".list-col-wrap": { "grid-row-start": "2" } }; + +// packages/daisyui/components/list/index.js +var list_default = ({ addComponents, prefix = "" }) => { + const prefixedlist = addPrefix(object_default40, prefix); + addComponents({ ...prefixedlist }); +}; + +// packages/daisyui/components/mockup/object.js +var object_default41 = { ".mockup-code": { position: "relative", overflow: "hidden", "overflow-x": "auto", "border-radius": "var(--radius-box)", "background-color": "var(--color-neutral)", "padding-block": "calc(0.25rem * 5)", color: "var(--color-neutral-content)", "font-size": "0.875rem", direction: "ltr", "&:before": { content: '""', "margin-bottom": "calc(0.25rem * 4)", display: "block", height: "calc(0.25rem * 3)", width: "calc(0.25rem * 3)", "border-radius": "calc(infinity * 1px)", opacity: "30%", "box-shadow": "1.4em 0, 2.8em 0, 4.2em 0" }, pre: { "padding-right": "calc(0.25rem * 5)", "&:before": { content: '""', "margin-right": "2ch" }, "&[data-prefix]": { "&:before": { content: "attr(data-prefix)", display: "inline-block", width: "calc(0.25rem * 8)", "text-align": "right", opacity: "50%" } } } }, ".mockup-window": { position: "relative", display: "flex", "flex-direction": "column", overflow: "hidden", "overflow-x": "auto", "border-radius": "var(--radius-box)", "padding-top": "calc(0.25rem * 5)", "&:before": { content: '""', "margin-bottom": "calc(0.25rem * 4)", display: "block", "aspect-ratio": "1 / 1", height: "calc(0.25rem * 3)", "flex-shrink": 0, "align-self": "flex-start", "border-radius": "calc(infinity * 1px)", opacity: "30%", "box-shadow": "1.4em 0, 2.8em 0, 4.2em 0" }, '[dir="rtl"] &:before': { "align-self": "flex-end" }, "pre[data-prefix]": { "&:before": { content: "attr(data-prefix)", display: "inline-block", "text-align": "right" } } }, ".mockup-browser": { position: "relative", overflow: "hidden", "overflow-x": "auto", "border-radius": "var(--radius-box)", "pre[data-prefix]": { "&:before": { content: "attr(data-prefix)", display: "inline-block", "text-align": "right" } }, ".mockup-browser-toolbar": { "margin-block": "calc(0.25rem * 3)", display: "inline-flex", width: "100%", "align-items": "center", "padding-right": "1.4em", '&:where(:dir(rtl), [dir="rtl"], [dir="rtl"] *)': { "flex-direction": "row-reverse" }, "&:before": { content: '""', "margin-right": "4.8rem", display: "inline-block", "aspect-ratio": "1 / 1", height: "calc(0.25rem * 3)", "border-radius": "calc(infinity * 1px)", opacity: "30%", "box-shadow": "1.4em 0, 2.8em 0, 4.2em 0" }, ".input": { "margin-inline": "auto", display: "flex", height: "100%", "align-items": "center", gap: "calc(0.25rem * 2)", overflow: "hidden", "background-color": "var(--color-base-200)", "text-overflow": "ellipsis", "white-space": "nowrap", "font-size": "0.75rem", direction: "ltr", "&:before": { content: '""', width: "calc(0.25rem * 4)", height: "calc(0.25rem * 4)", opacity: "30%", "background-image": `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='currentColor' class='size-4'%3E%3Cpath fill-rule='evenodd' d='M9.965 11.026a5 5 0 1 1 1.06-1.06l2.755 2.754a.75.75 0 1 1-1.06 1.06l-2.755-2.754ZM10.5 7a3.5 3.5 0 1 1-7 0 3.5 3.5 0 0 1 7 0Z' clip-rule='evenodd' /%3E%3C/svg%3E%0A")` } } } }, ".mockup-phone": { display: "inline-grid", "justify-items": "center", border: "6px solid #6b6b6b", "border-radius": "65px", "background-color": "#000", padding: "11px", overflow: "hidden" }, ".mockup-phone-camera": { "grid-column": "1/1", "grid-row": "1/1", background: "#000", height: "32px", width: "126px", "border-radius": "17px", "z-index": 1, "margin-top": "6px" }, ".mockup-phone-display": { "grid-column": "1/1", "grid-row": "1/1", overflow: "hidden", "border-radius": "49px", width: "390px", height: "845px" } }; + +// packages/daisyui/components/mockup/index.js +var mockup_default = ({ addComponents, prefix = "" }) => { + const prefixedmockup = addPrefix(object_default41, prefix); + addComponents({ ...prefixedmockup }); +}; + +// packages/daisyui/components/calendar/object.js +var object_default42 = { ".cally": { "font-size": "0.7rem", "&::part(container)": { padding: "0.5rem 1rem", "user-select": "none" }, "::part(th)": { "font-weight": "normal", "block-size": "auto" }, "&::part(header)": { direction: "ltr" }, "::part(head)": { opacity: 0.5, "font-size": "0.7rem" }, "&::part(button)": { "border-radius": "var(--radius-field)", border: "none", padding: "0.5rem", background: "#0000" }, "&::part(button):hover": { background: "var(--color-base-200)" }, "::part(day)": { "border-radius": "var(--radius-field)", "font-size": "0.7rem" }, "::part(button day today)": { background: "var(--color-primary)", color: "var(--color-primary-content)" }, "::part(selected)": { color: "var(--color-base-100)", background: "var(--color-base-content)", "border-radius": "var(--radius-field)" }, "::part(range-inner)": { "border-radius": "0" }, "::part(range-start)": { "border-start-end-radius": "0", "border-end-end-radius": "0" }, "::part(range-end)": { "border-start-start-radius": "0", "border-end-start-radius": "0" }, "::part(range-start range-end)": { "border-radius": "var(--radius-field)" }, "calendar-month": { width: "100%" } }, ".react-day-picker": { "user-select": "none", "background-color": "var(--color-base-100)", "border-radius": "var(--radius-box)", border: "var(--border) solid var(--color-base-200)", "font-size": "0.75rem", display: "inline-block", position: "relative", overflow: "clip", '&[dir="rtl"]': { ".rdp-nav": { ".rdp-chevron": { "transform-origin": "50%", transform: "rotate(180deg)" } } }, "*": { "box-sizing": "border-box" }, ".rdp-day": { width: "2.25rem", height: "2.25rem", "text-align": "center" }, ".rdp-day_button": { cursor: "pointer", font: "inherit", color: "inherit", width: "2.25rem", height: "2.25rem", border: "2px solid #0000", "border-radius": "var(--radius-field)", background: "0 0", "justify-content": "center", "align-items": "center", margin: "0", padding: "0", display: "flex", "&:disabled": { cursor: "revert" }, "&:hover": { "background-color": "var(--color-base-200)" } }, ".rdp-caption_label": { "z-index": 1, "white-space": "nowrap", border: "0", "align-items": "center", display: "inline-flex", position: "relative" }, ".rdp-button_next": { "border-radius": "var(--radius-field)", "&:hover": { "background-color": "var(--color-base-200)" } }, ".rdp-button_previous": { "border-radius": "var(--radius-field)", "&:hover": { "background-color": "var(--color-base-200)" } }, ".rdp-button_next, .rdp-button_previous": { cursor: "pointer", font: "inherit", color: "inherit", appearance: "none", width: "2.25rem", height: "2.25rem", background: "0 0", border: "none", "justify-content": "center", "align-items": "center", margin: "0", padding: "0", display: "inline-flex", position: "relative", "&:disabled": { cursor: "revert", opacity: 0.5 } }, ".rdp-chevron": { fill: "var(--color-base-content)", width: "1rem", height: "1rem", display: "inline-block" }, ".rdp-dropdowns": { "align-items": "center", gap: "0.5rem", display: "inline-flex", position: "relative" }, ".rdp-dropdown": { "z-index": 2, opacity: 0, appearance: "none", cursor: "inherit", "line-height": "inherit", border: "none", width: "100%", margin: "0", padding: "0", position: "absolute", "inset-block": "0", "inset-inline-start": "0", "&:focus-visible": { "~ .rdp-caption_label": { outline: ["5px auto highlight", "5px auto -webkit-focus-ring-color"] } } }, ".rdp-dropdown_root": { "align-items": "center", display: "inline-flex", position: "relative", '&[data-disabled="true"]': { ".rdp-chevron": { opacity: 0.5 } } }, ".rdp-month_caption": { height: "2.75rem", "font-size": "0.75rem", "font-weight": "inherit", "place-content": "center", display: "flex" }, ".rdp-months": { gap: "2rem", "flex-wrap": "wrap", "max-width": "fit-content", padding: "0.5rem", display: "flex", position: "relative" }, ".rdp-month_grid": { "border-collapse": "collapse" }, ".rdp-nav": { height: "2.75rem", "inset-block-start": "0", "inset-inline-end": "0", "justify-content": "space-between", "align-items": "center", width: "100%", "padding-inline": "0.5rem", display: "flex", position: "absolute", top: "0.25rem" }, ".rdp-weekday": { opacity: 0.6, padding: "0.5rem 0rem", "text-align": "center", "font-size": "smaller", "font-weight": 500 }, ".rdp-week_number": { opacity: 0.6, height: "2.25rem", width: "2.25rem", border: "none", "border-radius": "100%", "text-align": "center", "font-size": "small", "font-weight": 400 }, ".rdp-today:not(.rdp-outside)": { ".rdp-day_button": { background: "var(--color-primary)", color: "var(--color-primary-content)" } }, ".rdp-selected": { "font-weight": "inherit", "font-size": "0.75rem", ".rdp-day_button": { color: "var(--color-base-100)", "background-color": "var(--color-base-content)", "border-radius": "var(--radius-field)", border: "none", "&:hover": { "background-color": "var(--color-base-content)" } } }, ".rdp-outside": { opacity: 0.75 }, ".rdp-disabled": { opacity: 0.5 }, ".rdp-hidden": { visibility: "hidden", color: "var(--color-base-content)" }, ".rdp-range_start": { ".rdp-day_button": { "border-radius": "var(--radius-field) 0 0 var(--radius-field)" } }, ".rdp-range_start .rdp-day_button": { "background-color": "var(--color-base-content)", color: "var(--color-base-content)" }, ".rdp-range_middle": { "background-color": "var(--color-base-200)" }, ".rdp-range_middle .rdp-day_button": { border: "unset", "border-radius": "unset", color: "inherit" }, ".rdp-range_end": { color: "var(--color-base-content)", ".rdp-day_button": { "border-radius": "0 var(--radius-field) var(--radius-field) 0" } }, ".rdp-range_end .rdp-day_button": { color: "var(--color-base-content)", "background-color": "var(--color-base-content)" }, ".rdp-range_start.rdp-range_end": { background: "revert" }, ".rdp-focusable": { cursor: "pointer" }, ".rdp-footer": { "border-top": "var(--border) solid var(--color-base-200)", padding: "0.5rem" } }, ".pika-single": { "&:is(div)": { "user-select": "none", "font-size": "0.75rem", "z-index": 999, display: "inline-block", position: "relative", color: "var(--color-base-content)", "background-color": "var(--color-base-100)", "border-radius": "var(--radius-box)", border: "var(--border) solid var(--color-base-200)", padding: "0.5rem", "&:before, &:after": { content: '""', display: "table" }, "&:after": { clear: "both" }, "&.is-hidden": { display: "none" }, "&.is-bound": { position: "absolute" }, ".pika-lendar": { "css-float": "left" }, ".pika-title": { position: "relative", "text-align": "center", select: { cursor: "pointer", position: "absolute", "z-index": 999, margin: "0", left: "0", top: "5px", opacity: 0 } }, ".pika-label": { display: "inline-block", position: "relative", "z-index": 999, overflow: "hidden", margin: "0", padding: "5px 3px", "background-color": "var(--color-base-100)" }, ".pika-prev, .pika-next": { display: "block", cursor: "pointer", position: "absolute", top: "0", outline: "none", border: "0", width: "2.25rem", height: "2.25rem", color: "#0000", "font-size": "1.2em", "border-radius": "var(--radius-field)", "&:hover": { "background-color": "var(--color-base-200)" }, "&.is-disabled": { cursor: "default", opacity: 0.2 }, "&:before": { display: "inline-block", width: "2.25rem", height: "2.25rem", "line-height": 2.25, color: "var(--color-base-content)" } }, ".pika-prev": { left: "0", "&:before": { content: '"‹"' } }, ".pika-next": { right: "0", "&:before": { content: '"›"' } }, ".pika-select": { display: "inline-block" }, ".pika-table": { width: "100%", "border-collapse": "collapse", "border-spacing": "0", border: "0", "th, td": { padding: "0" }, th: { opacity: 0.6, "text-align": "center", width: "2.25rem", height: "2.25rem" } }, ".pika-button": { cursor: "pointer", display: "block", outline: "none", border: "0", margin: "0", width: "2.25rem", height: "2.25rem", padding: "5px", "text-align": ["right", "center"] }, ".pika-week": { color: "var(--color-base-content)" }, ".is-today": { ".pika-button": { background: "var(--color-primary)", color: "var(--color-primary-content)" } }, ".is-selected, .has-event": { ".pika-button": { "&, &:hover": { color: "var(--color-base-100)", "background-color": "var(--color-base-content)", "border-radius": "var(--radius-field)" } } }, ".has-event": { ".pika-button": { background: "var(--color-base-primary)" } }, ".is-disabled, .is-inrange": { ".pika-button": { background: "var(--color-base-primary)" } }, ".is-startrange": { ".pika-button": { color: "var(--color-base-100)", background: "var(--color-base-content)", "border-radius": "var(--radius-field)" } }, ".is-endrange": { ".pika-button": { color: "var(--color-base-100)", background: "var(--color-base-content)", "border-radius": "var(--radius-field)" } }, ".is-disabled": { ".pika-button": { "pointer-events": "none", cursor: "default", color: "var(--color-base-content)", opacity: 0.3 } }, ".is-outside-current-month": { ".pika-button": { color: "var(--color-base-content)", opacity: 0.3 } }, ".is-selection-disabled": { "pointer-events": "none", cursor: "default" }, ".pika-button:hover, .pika-row.pick-whole-week:hover .pika-button": { color: "var(--color-base-content)", "background-color": "var(--color-base-200)", "border-radius": "var(--radius-field)" }, ".pika-table abbr": { "text-decoration": "none", "font-weight": "normal" } } } }; + +// packages/daisyui/components/calendar/index.js +var calendar_default = ({ addComponents, prefix = "" }) => { + const prefixedcalendar = addPrefix(object_default42, prefix); + addComponents({ ...prefixedcalendar }); +}; + +// packages/daisyui/components/indicator/object.js +var object_default43 = { ".indicator": { position: "relative", display: "inline-flex", width: "max-content", ":where(.indicator-item)": { "z-index": 1, position: "absolute", "white-space": "nowrap", top: "var(--inidicator-t, 0)", bottom: "var(--inidicator-b, auto)", left: "var(--inidicator-s, auto)", right: "var(--inidicator-e, 0)", translate: "var(--inidicator-x, 50%) var(--indicator-y, -50%)" } }, ".indicator-start": { "--inidicator-s": "0", "--inidicator-e": "auto", "--inidicator-x": "-50%" }, ".indicator-center": { "--inidicator-s": "50%", "--inidicator-e": "50%", "--inidicator-x": "-50%", '[dir="rtl"] &': { "--inidicator-x": "50%" } }, ".indicator-end": { "--inidicator-s": "auto", "--inidicator-e": "0", "--inidicator-x": "50%" }, ".indicator-bottom": { "--inidicator-t": "auto", "--inidicator-b": "0", "--indicator-y": "50%" }, ".indicator-middle": { "--inidicator-t": "50%", "--inidicator-b": "50%", "--indicator-y": "-50%" }, ".indicator-top": { "--inidicator-t": "0", "--inidicator-b": "auto", "--indicator-y": "-50%" } }; + +// packages/daisyui/components/indicator/index.js +var indicator_default = ({ addComponents, prefix = "" }) => { + const prefixedindicator = addPrefix(object_default43, prefix); + addComponents({ ...prefixedindicator }); +}; + +// packages/daisyui/components/rating/object.js +var object_default44 = { ".rating": { position: "relative", display: "inline-flex", "vertical-align": "middle", "& input": { border: "none", appearance: "none" }, ":where(*)": { animation: "rating 0.25s ease-out", height: "calc(0.25rem * 6)", width: "calc(0.25rem * 6)", "border-radius": "0", "background-color": "var(--color-base-content)", opacity: "20%", "&:is(input)": { cursor: "pointer" } }, "& .rating-hidden": { width: "calc(0.25rem * 2)", "background-color": "transparent" }, 'input[type="radio"]:checked': { "background-image": "none" }, "*": { '&:checked, &[aria-checked="true"], &[aria-current="true"], &:has(~ *:checked, ~ *[aria-checked="true"], ~ *[aria-current="true"])': { opacity: "100%" }, "&:focus-visible": { transition: "scale 0.2s ease-out", scale: "1.1" } }, "& *:active:focus": { animation: "none", scale: "1.1" }, "&.rating-xs :where(*:not(.rating-hidden))": { width: "calc(0.25rem * 4)", height: "calc(0.25rem * 4)" }, "&.rating-sm :where(*:not(.rating-hidden))": { width: "calc(0.25rem * 5)", height: "calc(0.25rem * 5)" }, "&.rating-md :where(*:not(.rating-hidden))": { width: "calc(0.25rem * 6)", height: "calc(0.25rem * 6)" }, "&.rating-lg :where(*:not(.rating-hidden))": { width: "calc(0.25rem * 7)", height: "calc(0.25rem * 7)" }, "&.rating-xl :where(*:not(.rating-hidden))": { width: "calc(0.25rem * 8)", height: "calc(0.25rem * 8)" } }, ".rating-half": { ":where(*:not(.rating-hidden))": { width: "calc(0.25rem * 3)" }, "&.rating-xs *:not(.rating-hidden)": { width: "calc(0.25rem * 2)" }, "&.rating-sm *:not(.rating-hidden)": { width: "calc(0.25rem * 2.5)" }, "&.rating-md *:not(.rating-hidden)": { width: "calc(0.25rem * 3)" }, "&.rating-lg *:not(.rating-hidden)": { width: ".875rem" }, "&.rating-xl *:not(.rating-hidden)": { width: "calc(0.25rem * 4)" } }, "@keyframes rating": { "0%, 40%": { scale: "1.1", filter: "brightness(1.05) contrast(1.05)" } } }; + +// packages/daisyui/components/rating/index.js +var rating_default = ({ addComponents, prefix = "" }) => { + const prefixedrating = addPrefix(object_default44, prefix); + addComponents({ ...prefixedrating }); +}; + +// packages/daisyui/components/tab/object.js +var object_default45 = { ".tabs": { display: "flex", "flex-wrap": "wrap", "--tabs-height": "auto", "--tabs-direction": "row", height: "var(--tabs-height)", "flex-direction": "var(--tabs-direction)" }, ".tab": { position: "relative", display: "inline-flex", cursor: "pointer", appearance: "none", "flex-wrap": "wrap", "align-items": "center", "justify-content": "center", "text-align": "center", "webkit-user-select": "none", "user-select": "none", "&:hover": { "@media (hover: hover)": { color: "var(--color-base-content)" } }, "--tab-p": "1rem", "--tab-bg": "var(--color-base-100)", "--tab-border-color": "var(--color-base-300)", "--tab-radius-ss": "0", "--tab-radius-se": "0", "--tab-radius-es": "0", "--tab-radius-ee": "0", "--tab-order": "0", "--tab-radius-min": "calc(0.75rem - var(--border))", "border-color": "#0000", order: "var(--tab-order)", height: "calc(var(--size-field, 0.25rem) * 10)", "font-size": "0.875rem", "padding-inline-start": "var(--tab-p)", "padding-inline-end": "var(--tab-p)", '&:is(input[type="radio"])': { "min-width": "fit-content", "&:after": { content: "attr(aria-label)" } }, "&:is(label)": { position: "relative", input: { position: "absolute", inset: "calc(0.25rem * 0)", cursor: "pointer", appearance: "none", opacity: "0%" } }, '&:checked, &:is(label:has(:checked)), &:is(.tab-active, [aria-selected="true"])': { "& + .tab-content": { display: "block", height: "100%" } }, '&:not(:checked, label:has(:checked), :hover, .tab-active, [aria-selected="true"])': { color: "color-mix(in oklab, var(--color-base-content) 50%, transparent)" }, "&:not(input):empty": { "flex-grow": 1, cursor: "default" }, "&:focus": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } }, "&:focus-visible, &:is(label:has(:checked:focus-visible))": { outline: "2px solid currentColor", "outline-offset": "-5px" }, "&[disabled]": { "pointer-events": "none", opacity: "40%" } }, ".tab-disabled": { "pointer-events": "none", opacity: "40%" }, ".tabs-border": { ".tab": { "--tab-border-color": "#0000 #0000 var(--tab-border-color) #0000", position: "relative", "border-radius": "var(--radius-field)", "&:before": { "--tw-content": '""', content: "var(--tw-content)", "background-color": "var(--tab-border-color)", transition: "background-color 0.2s ease", width: "80%", height: "3px", "border-radius": "var(--radius-field)", bottom: "0", left: "10%", position: "absolute" }, '&:is(.tab-active, [aria-selected="true"]):not(.tab-disabled, [disabled]), &:is(input:checked), &:is(label:has(:checked))': { "&:before": { "--tab-border-color": "currentColor", "border-top": "3px solid" } } } }, ".tabs-lift": { "--tabs-height": "auto", "--tabs-direction": "row", "> .tab": { "--tab-border": "0 0 var(--border) 0", "--tab-radius-ss": "min(var(--radius-field), var(--tab-radius-min))", "--tab-radius-se": "min(var(--radius-field), var(--tab-radius-min))", "--tab-radius-es": "0", "--tab-radius-ee": "0", "--tab-paddings": "var(--border) var(--tab-p) 0 var(--tab-p)", "--tab-border-colors": "#0000 #0000 var(--tab-border-color) #0000", "--tab-corner-width": "calc(100% + min(var(--radius-field), var(--tab-radius-min)) * 2)", "--tab-corner-height": "min(var(--radius-field), var(--tab-radius-min))", "--tab-corner-position": "top left, top right", "border-width": "var(--tab-border)", "border-start-start-radius": "var(--tab-radius-ss)", "border-start-end-radius": "var(--tab-radius-se)", "border-end-start-radius": "var(--tab-radius-es)", "border-end-end-radius": "var(--tab-radius-ee)", padding: "var(--tab-paddings)", "border-color": "var(--tab-border-colors)", '&:is(.tab-active, [aria-selected="true"]):not(.tab-disabled, [disabled]), &:is(input:checked, label:has(:checked))': { "--tab-border": "var(--border) var(--border) 0 var(--border)", "--tab-border-colors": `var(--tab-border-color) var(--tab-border-color) #0000 + var(--tab-border-color)`, "--tab-paddings": `0 calc(var(--tab-p) - var(--border)) var(--border) + calc(var(--tab-p) - var(--border))`, "--tab-inset": "auto auto 0 auto", "--tab-grad": "calc(69% - var(--border))", "--radius-start": `radial-gradient( + circle at top left, + #0000 var(--tab-grad), + var(--tab-border-color) calc(var(--tab-grad) + 0.25px), + var(--tab-border-color) calc(var(--tab-grad) + var(--border)), + var(--tab-bg) calc(var(--tab-grad) + var(--border) + 0.25px) + )`, "--radius-end": `radial-gradient( + circle at top right, + #0000 var(--tab-grad), + var(--tab-border-color) calc(var(--tab-grad) + 0.25px), + var(--tab-border-color) calc(var(--tab-grad) + var(--border)), + var(--tab-bg) calc(var(--tab-grad) + var(--border) + 0.25px) + )`, "background-color": "var(--tab-bg)", "&:before": { "z-index": 1, content: '""', display: "block", position: "absolute", width: "var(--tab-corner-width)", height: "var(--tab-corner-height)", "background-position": "var(--tab-corner-position)", "background-image": "var(--radius-start), var(--radius-end)", "background-size": "min(var(--radius-field), var(--tab-radius-min)) min(var(--radius-field), var(--tab-radius-min))", "background-repeat": "no-repeat", inset: "var(--tab-inset)" }, "&:first-child:before": { "--radius-start": "none" }, '[dir="rtl"] &:first-child:before': { transform: "rotateY(180deg)" }, "&:last-child:before": { "--radius-end": "none" }, '[dir="rtl"] &:last-child:before': { transform: "rotateY(180deg)" } } }, "&:has(.tab-content)": { "> .tab:first-child": { '&:not(.tab-active, [aria-selected="true"])': { "--tab-border-colors": `var(--tab-border-color) var(--tab-border-color) #0000 + var(--tab-border-color)` } } }, ".tab-content": { "--tabcontent-margin": "calc(-1 * var(--border)) 0 0 0", "--tabcontent-radius-ss": "0", "--tabcontent-radius-se": "var(--radius-box)", "--tabcontent-radius-es": "var(--radius-box)", "--tabcontent-radius-ee": "var(--radius-box)" }, ':checked, label:has(:checked), :is(.tab-active, [aria-selected="true"])': { "& + .tab-content": { "&:nth-child(1), &:nth-child(n + 3)": { "--tabcontent-radius-ss": "var(--radius-box)" } } } }, ".tabs-top": { "--tabs-height": "auto", "--tabs-direction": "row", ".tab": { "--tab-order": "0", "--tab-border": "0 0 var(--border) 0", "--tab-radius-ss": "min(var(--radius-field), var(--tab-radius-min))", "--tab-radius-se": "min(var(--radius-field), var(--tab-radius-min))", "--tab-radius-es": "0", "--tab-radius-ee": "0", "--tab-paddings": "var(--border) var(--tab-p) 0 var(--tab-p)", "--tab-border-colors": "#0000 #0000 var(--tab-border-color) #0000", "--tab-corner-width": "calc(100% + min(var(--radius-field), var(--tab-radius-min)) * 2)", "--tab-corner-height": "min(var(--radius-field), var(--tab-radius-min))", "--tab-corner-position": "top left, top right", '&:is(.tab-active, [aria-selected="true"]):not(.tab-disabled, [disabled]), &:is(input:checked), &:is(label:has(:checked))': { "--tab-border": "var(--border) var(--border) 0 var(--border)", "--tab-border-colors": `var(--tab-border-color) var(--tab-border-color) #0000 + var(--tab-border-color)`, "--tab-paddings": `0 calc(var(--tab-p) - var(--border)) var(--border) + calc(var(--tab-p) - var(--border))`, "--tab-inset": "auto auto 0 auto", "--radius-start": `radial-gradient( + circle at top left, + #0000 var(--tab-grad), + var(--tab-border-color) calc(var(--tab-grad) + 0.25px), + var(--tab-border-color) calc(var(--tab-grad) + var(--border)), + var(--tab-bg) calc(var(--tab-grad) + var(--border) + 0.25px) + )`, "--radius-end": `radial-gradient( + circle at top right, + #0000 var(--tab-grad), + var(--tab-border-color) calc(var(--tab-grad) + 0.25px), + var(--tab-border-color) calc(var(--tab-grad) + var(--border)), + var(--tab-bg) calc(var(--tab-grad) + var(--border) + 0.25px) + )` } }, "&:has(.tab-content)": { "> .tab:first-child": { '&:not(.tab-active, [aria-selected="true"])': { "--tab-border-colors": `var(--tab-border-color) var(--tab-border-color) #0000 + var(--tab-border-color)` } } }, ".tab-content": { "--tabcontent-order": "1", "--tabcontent-margin": "calc(-1 * var(--border)) 0 0 0", "--tabcontent-radius-ss": "0", "--tabcontent-radius-se": "var(--radius-box)", "--tabcontent-radius-es": "var(--radius-box)", "--tabcontent-radius-ee": "var(--radius-box)" }, ':checked, label:has(:checked), :is(.tab-active, [aria-selected="true"])': { "& + .tab-content": { "&:nth-child(1), &:nth-child(n + 3)": { "--tabcontent-radius-ss": "var(--radius-box)" } } } }, ".tabs-bottom": { "--tabs-height": "auto", "--tabs-direction": "row", ".tab": { "--tab-order": "1", "--tab-border": "var(--border) 0 0 0", "--tab-radius-ss": "0", "--tab-radius-se": "0", "--tab-radius-es": "min(var(--radius-field), var(--tab-radius-min))", "--tab-radius-ee": "min(var(--radius-field), var(--tab-radius-min))", "--tab-border-colors": "var(--tab-border-color) #0000 #0000 #0000", "--tab-paddings": "0 var(--tab-p) var(--border) var(--tab-p)", "--tab-corner-width": "calc(100% + min(var(--radius-field), var(--tab-radius-min)) * 2)", "--tab-corner-height": "min(var(--radius-field), var(--tab-radius-min))", "--tab-corner-position": "top left, top right", '&:is(.tab-active, [aria-selected="true"]):not(.tab-disabled, [disabled]), &:is(input:checked), &:is(label:has(:checked))': { "--tab-border": "0 var(--border) var(--border) var(--border)", "--tab-border-colors": `#0000 var(--tab-border-color) var(--tab-border-color) + var(--tab-border-color)`, "--tab-paddings": `var(--border) calc(var(--tab-p) - var(--border)) 0 + calc(var(--tab-p) - var(--border))`, "--tab-inset": "0 auto auto auto", "--radius-start": `radial-gradient( + circle at bottom left, + #0000 var(--tab-grad), + var(--tab-border-color) calc(var(--tab-grad) + 0.25px), + var(--tab-border-color) calc(var(--tab-grad) + var(--border)), + var(--tab-bg) calc(var(--tab-grad) + var(--border) + 0.25px) + )`, "--radius-end": `radial-gradient( + circle at bottom right, + #0000 var(--tab-grad), + var(--tab-border-color) calc(var(--tab-grad) + 0.25px), + var(--tab-border-color) calc(var(--tab-grad) + var(--border)), + var(--tab-bg) calc(var(--tab-grad) + var(--border) + 0.25px) + )` } }, "&:has(.tab-content)": { "> .tab:first-child": { '&:not(.tab-active, [aria-selected="true"])': { "--tab-border-colors": `#0000 var(--tab-border-color) var(--tab-border-color) + var(--tab-border-color)` } } }, ".tab-content": { "--tabcontent-order": "0", "--tabcontent-margin": "0 0 calc(-1 * var(--border)) 0", "--tabcontent-radius-ss": "var(--radius-box)", "--tabcontent-radius-se": "var(--radius-box)", "--tabcontent-radius-es": "0", "--tabcontent-radius-ee": "var(--radius-box)" }, '> :checked, > :is(label:has(:checked)), > :is(.tab-active, [aria-selected="true"])': { "& + .tab-content:not(:nth-child(2))": { "--tabcontent-radius-es": "var(--radius-box)" } } }, ".tabs-box": { "background-color": "var(--color-base-200)", padding: "calc(0.25rem * 1)", "--tabs-box-radius": "calc(var(--radius-field) + var(--radius-field) + var(--radius-field))", "border-radius": "calc(var(--radius-field) + min(0.25rem, var(--tabs-box-radius)))", "box-shadow": "0 -0.5px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 0.5px oklch(0% 0 0 / calc(var(--depth) * 0.05)) inset", ".tab": { "border-radius": "var(--radius-field)", "border-style": "none", "&:focus-visible, &:is(label:has(:checked:focus-visible))": { "outline-offset": "2px" } }, '> :is(.tab-active, [aria-selected="true"]):not(.tab-disabled, [disabled]), > :is(input:checked), > :is(label:has(:checked))': { "background-color": "var(--tab-bg, var(--color-base-100))", "box-shadow": "0 1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px 1px -1px color-mix(in oklab, var(--color-neutral) calc(var(--depth) * 50%), #0000), 0 1px 6px -4px color-mix(in oklab, var(--color-neutral) calc(var(--depth) * 100%), #0000)", "@media (forced-colors: active)": { border: "1px solid" } } }, ".tab-content": { order: [1, "var(--tabcontent-order)"], display: "none", "border-color": "transparent", "--tabcontent-radius-ss": "0", "--tabcontent-radius-se": "0", "--tabcontent-radius-es": "0", "--tabcontent-radius-ee": "0", "--tabcontent-order": "1", width: "100%", margin: "var(--tabcontent-margin)", "border-width": "var(--border)", "border-start-start-radius": "var(--tabcontent-radius-ss)", "border-start-end-radius": "var(--tabcontent-radius-se)", "border-end-start-radius": "var(--tabcontent-radius-es)", "border-end-end-radius": "var(--tabcontent-radius-ee)" }, ".tabs-xs": { ":where(.tab)": { height: "calc(var(--size-field, 0.25rem) * 6)", "font-size": "0.75rem", "--tab-p": "0.375rem", "--tab-radius-min": "calc(0.5rem - var(--border))" } }, ".tabs-sm": { ":where(.tab)": { height: "calc(var(--size-field, 0.25rem) * 8)", "font-size": "0.875rem", "--tab-p": "0.5rem", "--tab-radius-min": "calc(0.5rem - var(--border))" } }, ".tabs-md": { ":where(.tab)": { height: "calc(var(--size-field, 0.25rem) * 10)", "font-size": "0.875rem", "--tab-p": "0.75rem", "--tab-radius-min": "calc(0.75rem - var(--border))" } }, ".tabs-lg": { ":where(.tab)": { height: "calc(var(--size-field, 0.25rem) * 12)", "font-size": "1.125rem", "--tab-p": "1rem", "--tab-radius-min": "calc(1.5rem - var(--border))" } }, ".tabs-xl": { ":where(.tab)": { height: "calc(var(--size-field, 0.25rem) * 14)", "font-size": "1.125rem", "--tab-p": "1.25rem", "--tab-radius-min": "calc(2rem - var(--border))" } } }; + +// packages/daisyui/components/tab/index.js +var tab_default = ({ addComponents, prefix = "" }) => { + const prefixedtab = addPrefix(object_default45, prefix); + addComponents({ ...prefixedtab }); +}; + +// packages/daisyui/components/filter/object.js +var object_default46 = { ".filter": { display: "flex", "flex-wrap": "wrap", 'input[type="radio"]': { width: "auto" }, input: { overflow: "hidden", opacity: "100%", scale: "1", transition: "margin 0.1s, opacity 0.3s, padding 0.3s, border-width 0.1s", "&:not(:last-child)": { "margin-inline-end": "calc(0.25rem * 1)" }, "&.filter-reset": { "aspect-ratio": "1 / 1", "&::after": { content: '"×"' } } }, "&:not(:has(input:checked:not(.filter-reset)))": { '.filter-reset, input[type="reset"]': { scale: "0", "border-width": "0", "margin-inline": "calc(0.25rem * 0)", width: "calc(0.25rem * 0)", "padding-inline": "calc(0.25rem * 0)", opacity: "0%" } }, "&:has(input:checked:not(.filter-reset))": { 'input:not(:checked, .filter-reset, input[type="reset"])': { scale: "0", "border-width": "0", "margin-inline": "calc(0.25rem * 0)", width: "calc(0.25rem * 0)", "padding-inline": "calc(0.25rem * 0)", opacity: "0%" } } } }; + +// packages/daisyui/components/filter/index.js +var filter_default = ({ addComponents, prefix = "" }) => { + const prefixedfilter = addPrefix(object_default46, prefix); + addComponents({ ...prefixedfilter }); +}; + +// packages/daisyui/components/chat/object.js +var object_default47 = { ".chat": { display: "grid", "column-gap": "calc(0.25rem * 3)", "padding-block": "calc(0.25rem * 1)" }, ".chat-bubble": { position: "relative", display: "block", width: "fit-content", "border-radius": "var(--radius-field)", "background-color": "var(--color-base-300)", "padding-inline": "calc(0.25rem * 4)", "padding-block": "calc(0.25rem * 2)", color: "var(--color-base-content)", "grid-row-end": "3", "min-height": "2rem", "min-width": "2.5rem", "max-width": "90%", "&:before": { position: "absolute", bottom: "calc(0.25rem * 0)", height: "calc(0.25rem * 3)", width: "calc(0.25rem * 3)", "background-color": "inherit", content: '""', "mask-repeat": "no-repeat", "mask-image": "var(--mask-chat)", "mask-position": "0px -1px", "mask-size": "13px" } }, ".chat-bubble-primary": { "background-color": "var(--color-primary)", color: "var(--color-primary-content)" }, ".chat-bubble-secondary": { "background-color": "var(--color-secondary)", color: "var(--color-secondary-content)" }, ".chat-bubble-accent": { "background-color": "var(--color-accent)", color: "var(--color-accent-content)" }, ".chat-bubble-neutral": { "background-color": "var(--color-neutral)", color: "var(--color-neutral-content)" }, ".chat-bubble-info": { "background-color": "var(--color-info)", color: "var(--color-info-content)" }, ".chat-bubble-success": { "background-color": "var(--color-success)", color: "var(--color-success-content)" }, ".chat-bubble-warning": { "background-color": "var(--color-warning)", color: "var(--color-warning-content)" }, ".chat-bubble-error": { "background-color": "var(--color-error)", color: "var(--color-error-content)" }, ".chat-image": { "grid-row": "span 2 / span 2", "align-self": "flex-end" }, ".chat-header": { "grid-row-start": "1", display: "flex", gap: "calc(0.25rem * 1)", "font-size": "0.6875rem" }, ".chat-footer": { "grid-row-start": "3", display: "flex", gap: "calc(0.25rem * 1)", "font-size": "0.6875rem" }, ".chat-start": { "place-items": "start", "grid-template-columns": "auto 1fr", ".chat-header": { "grid-column-start": "2" }, ".chat-footer": { "grid-column-start": "2" }, ".chat-image": { "grid-column-start": "1" }, ".chat-bubble": { "grid-column-start": "2", "border-end-start-radius": "0", "&:before": { transform: "rotateY(0deg)", "inset-inline-start": "-0.75rem" }, '[dir="rtl"] &:before': { transform: "rotateY(180deg)" } } }, ".chat-end": { "place-items": "end", "grid-template-columns": "1fr auto", ".chat-header": { "grid-column-start": "1" }, ".chat-footer": { "grid-column-start": "1" }, ".chat-image": { "grid-column-start": "2" }, ".chat-bubble": { "grid-column-start": "1", "border-end-end-radius": "0", "&:before": { transform: "rotateY(180deg)", "inset-inline-start": "100%" }, '[dir="rtl"] &:before': { transform: "rotateY(0deg)" } } } }; + +// packages/daisyui/components/chat/index.js +var chat_default = ({ addComponents, prefix = "" }) => { + const prefixedchat = addPrefix(object_default47, prefix); + addComponents({ ...prefixedchat }); +}; + +// packages/daisyui/components/radialprogress/object.js +var object_default48 = { ".radial-progress": { position: "relative", display: "inline-grid", height: "var(--size)", width: "var(--size)", "place-content": "center", "border-radius": "calc(infinity * 1px)", "background-color": "transparent", "vertical-align": "middle", "box-sizing": "content-box", "--value": "0", "--size": "5rem", "--thickness": "calc(var(--size) / 10)", "--radialprogress": "calc(var(--value) * 1%)", transition: "--radialprogress 0.3s linear", "&:before": { position: "absolute", inset: "calc(0.25rem * 0)", "border-radius": "calc(infinity * 1px)", content: '""', background: "radial-gradient(farthest-side, currentColor 98%, #0000) top/var(--thickness) var(--thickness) no-repeat, conic-gradient(currentColor var(--radialprogress), #0000 0)", "webkit-mask": "radial-gradient( farthest-side, #0000 calc(100% - var(--thickness)), #000 calc(100% + 0.5px - var(--thickness)) )", mask: "radial-gradient( farthest-side, #0000 calc(100% - var(--thickness)), #000 calc(100% + 0.5px - var(--thickness)) )" }, "&:after": { position: "absolute", "border-radius": "calc(infinity * 1px)", "background-color": "currentColor", transition: "transform 0.3s linear", content: '""', inset: "calc(50% - var(--thickness) / 2)", transform: "rotate(calc(var(--value) * 3.6deg - 90deg)) translate(calc(var(--size) / 2 - 50%))" } } }; + +// packages/daisyui/components/radialprogress/index.js +var radialprogress_default = ({ addComponents, prefix = "" }) => { + const prefixedradialprogress = addPrefix(object_default48, prefix); + addComponents({ ...prefixedradialprogress }); +}; + +// packages/daisyui/components/countdown/object.js +var object_default49 = { ".countdown": { display: "inline-flex", "&.countdown": { "line-height": "1em" }, "& > *": { display: "inline-block", "overflow-y": "hidden", height: "1em", "&:before": { position: "relative", content: '"00\\A 01\\A 02\\A 03\\A 04\\A 05\\A 06\\A 07\\A 08\\A 09\\A 10\\A 11\\A 12\\A 13\\A 14\\A 15\\A 16\\A 17\\A 18\\A 19\\A 20\\A 21\\A 22\\A 23\\A 24\\A 25\\A 26\\A 27\\A 28\\A 29\\A 30\\A 31\\A 32\\A 33\\A 34\\A 35\\A 36\\A 37\\A 38\\A 39\\A 40\\A 41\\A 42\\A 43\\A 44\\A 45\\A 46\\A 47\\A 48\\A 49\\A 50\\A 51\\A 52\\A 53\\A 54\\A 55\\A 56\\A 57\\A 58\\A 59\\A 60\\A 61\\A 62\\A 63\\A 64\\A 65\\A 66\\A 67\\A 68\\A 69\\A 70\\A 71\\A 72\\A 73\\A 74\\A 75\\A 76\\A 77\\A 78\\A 79\\A 80\\A 81\\A 82\\A 83\\A 84\\A 85\\A 86\\A 87\\A 88\\A 89\\A 90\\A 91\\A 92\\A 93\\A 94\\A 95\\A 96\\A 97\\A 98\\A 99\\A"', "white-space": "pre", top: "calc(var(--value) * -1em)", "text-align": "center", transition: "all 1s cubic-bezier(1, 0, 0, 1)" } } } }; + +// packages/daisyui/components/countdown/index.js +var countdown_default = ({ addComponents, prefix = "" }) => { + const prefixedcountdown = addPrefix(object_default49, prefix); + addComponents({ ...prefixedcountdown }); +}; + +// packages/daisyui/components/tooltip/object.js +var object_default50 = { ".tooltip": { position: "relative", display: "inline-block", "--tt-bg": "var(--color-neutral)", "--tt-off": "calc(100% + 0.5rem)", "--tt-tail": "calc(100% + 1px + 0.25rem)", "> :where(.tooltip-content), &:where([data-tip]):before": { position: "absolute", "max-width": "20rem", "border-radius": "var(--radius-field)", "padding-inline": "calc(0.25rem * 2)", "padding-block": "calc(0.25rem * 1)", "text-align": "center", "white-space": "normal", color: "var(--color-neutral-content)", opacity: "0%", "font-size": "0.875rem", "line-height": 1.25, transition: "opacity 0.2s cubic-bezier(0.4, 0, 0.2, 1) 75ms, transform 0.2s cubic-bezier(0.4, 0, 0.2, 1) 75ms", "background-color": "var(--tt-bg)", width: "max-content", "pointer-events": "none", "z-index": 1, "--tw-content": "attr(data-tip)", content: "var(--tw-content)" }, "&:after": { position: ["absolute", "absolute"], opacity: "0%", "background-color": "var(--tt-bg)", transition: "opacity 0.2s cubic-bezier(0.4, 0, 0.2, 1) 75ms, transform 0.2s cubic-bezier(0.4, 0, 0.2, 1) 75ms", content: '""', "pointer-events": "none", width: "0.625rem", height: "0.25rem", display: "block", "mask-repeat": "no-repeat", "mask-position": "-1px 0", "--mask-tooltip": `url("data:image/svg+xml,%3Csvg width='10' height='4' viewBox='0 0 8 4' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M0.500009 1C3.5 1 3.00001 4 5.00001 4C7 4 6.5 1 9.5 1C10 1 10 0.499897 10 0H0C-1.99338e-08 0.5 0 1 0.500009 1Z' fill='black'/%3E%3C/svg%3E%0A")`, "mask-image": "var(--mask-tooltip)" }, '&.tooltip-open, &[data-tip]:not([data-tip=""]):hover, &:not(:has(.tooltip-content:empty)):has(.tooltip-content):hover, &:has(:focus-visible)': { "> .tooltip-content, &[data-tip]:before, &:after": { opacity: "100%", "--tt-pos": "0rem", transition: "opacity 0.2s cubic-bezier(0.4, 0, 0.2, 1) 0s, transform 0.2s cubic-bezier(0.4, 0, 0.2, 1) 0ms" } } }, ".tooltip, .tooltip-top": { "> .tooltip-content, &[data-tip]:before": { transform: "translateX(-50%) translateY(var(--tt-pos, 0.25rem))", inset: "auto auto var(--tt-off) 50%" }, "&:after": { transform: "translateX(-50%) translateY(var(--tt-pos, 0.25rem))", inset: "auto auto var(--tt-tail) 50%" } }, ".tooltip-bottom": { "> .tooltip-content, &[data-tip]:before": { transform: "translateX(-50%) translateY(var(--tt-pos, -0.25rem))", inset: "var(--tt-off) auto auto 50%" }, "&:after": { transform: "translateX(-50%) translateY(var(--tt-pos, -0.25rem)) rotate(180deg)", inset: "var(--tt-tail) auto auto 50%" } }, ".tooltip-left": { "> .tooltip-content, &[data-tip]:before": { transform: "translateX(calc(var(--tt-pos, 0.25rem) - 0.25rem)) translateY(-50%)", inset: "50% var(--tt-off) auto auto" }, "&:after": { transform: "translateX(var(--tt-pos, 0.25rem)) translateY(-50%) rotate(-90deg)", inset: "50% calc(var(--tt-tail) + 1px) auto auto" } }, ".tooltip-right": { "> .tooltip-content, &[data-tip]:before": { transform: "translateX(calc(var(--tt-pos, -0.25rem) + 0.25rem)) translateY(-50%)", inset: "50% auto auto var(--tt-off)" }, "&:after": { transform: "translateX(var(--tt-pos, -0.25rem)) translateY(-50%) rotate(90deg)", inset: "50% auto auto calc(var(--tt-tail) + 1px)" } }, ".tooltip-primary": { "--tt-bg": "var(--color-primary)", "> .tooltip-content, &[data-tip]:before": { color: "var(--color-primary-content)" } }, ".tooltip-secondary": { "--tt-bg": "var(--color-secondary)", "> .tooltip-content, &[data-tip]:before": { color: "var(--color-secondary-content)" } }, ".tooltip-accent": { "--tt-bg": "var(--color-accent)", "> .tooltip-content, &[data-tip]:before": { color: "var(--color-accent-content)" } }, ".tooltip-info": { "--tt-bg": "var(--color-info)", "> .tooltip-content, &[data-tip]:before": { color: "var(--color-info-content)" } }, ".tooltip-success": { "--tt-bg": "var(--color-success)", "> .tooltip-content, &[data-tip]:before": { color: "var(--color-success-content)" } }, ".tooltip-warning": { "--tt-bg": "var(--color-warning)", "> .tooltip-content, &[data-tip]:before": { color: "var(--color-warning-content)" } }, ".tooltip-error": { "--tt-bg": "var(--color-error)", "> .tooltip-content, &[data-tip]:before": { color: "var(--color-error-content)" } } }; + +// packages/daisyui/components/tooltip/index.js +var tooltip_default = ({ addComponents, prefix = "" }) => { + const prefixedtooltip = addPrefix(object_default50, prefix); + addComponents({ ...prefixedtooltip }); +}; + +// packages/daisyui/components/timeline/object.js +var object_default51 = { ".timeline": { position: "relative", display: "flex", "> li": { position: "relative", display: "grid", "flex-shrink": 0, "align-items": "center", "grid-template-rows": "var(--timeline-row-start, minmax(0, 1fr)) auto var( --timeline-row-end, minmax(0, 1fr) )", "grid-template-columns": "var(--timeline-col-start, minmax(0, 1fr)) auto var( --timeline-col-end, minmax(0, 1fr) )", "> hr": { border: "none", width: "100%", "&:first-child": { "grid-column-start": "1", "grid-row-start": "2" }, "&:last-child": { "grid-column-start": "3", "grid-column-end": "none", "grid-row-start": "2", "grid-row-end": "auto" }, "@media print": { border: "0.1px solid var(--color-base-300)" } } }, ":where(hr)": { height: "calc(0.25rem * 1)", "background-color": "var(--color-base-300)" }, "&:has(.timeline-middle hr)": { "&:first-child": { "border-start-start-radius": "0", "border-end-start-radius": "0", "border-start-end-radius": "var(--radius-selector)", "border-end-end-radius": "var(--radius-selector)" }, "&:last-child": { "border-start-start-radius": "var(--radius-selector)", "border-end-start-radius": "var(--radius-selector)", "border-start-end-radius": "0", "border-end-end-radius": "0" } }, "&:not(:has(.timeline-middle))": { ":first-child hr:last-child": { "border-start-start-radius": "var(--radius-selector)", "border-end-start-radius": "var(--radius-selector)", "border-start-end-radius": "0", "border-end-end-radius": "0" }, ":last-child hr:first-child": { "border-start-start-radius": "0", "border-end-start-radius": "0", "border-start-end-radius": "var(--radius-selector)", "border-end-end-radius": "var(--radius-selector)" } } }, ".timeline-box": { border: "var(--border) solid", "border-radius": "var(--radius-box)", "border-color": "var(--color-base-300)", "background-color": "var(--color-base-100)", "padding-inline": "calc(0.25rem * 4)", "padding-block": "calc(0.25rem * 2)", "font-size": "0.75rem", "box-shadow": "0 1px 2px 0 oklch(0% 0 0/0.05)" }, ".timeline-start": { "grid-column-start": "1", "grid-column-end": "4", "grid-row-start": "1", "grid-row-end": "2", margin: "calc(0.25rem * 1)", "align-self": "flex-end", "justify-self": "center" }, ".timeline-middle": { "grid-column-start": "2", "grid-row-start": "2" }, ".timeline-end": { "grid-column-start": "1", "grid-column-end": "4", "grid-row-start": "3", "grid-row-end": "4", margin: "calc(0.25rem * 1)", "align-self": "flex-start", "justify-self": "center" }, ".timeline-compact": { "--timeline-row-start": "0", ".timeline-start": { "grid-column-start": "1", "grid-column-end": "4", "grid-row-start": "3", "grid-row-end": "4", "align-self": "flex-start", "justify-self": "center" }, "li:has(.timeline-start)": { ".timeline-end": { "grid-column-start": "none", "grid-row-start": "auto" } }, "&.timeline-vertical": { "> li": { "--timeline-col-start": "0" }, ".timeline-start": { "grid-column-start": "3", "grid-column-end": "4", "grid-row-start": "1", "grid-row-end": "4", "align-self": "center", "justify-self": "flex-start" }, "li:has(.timeline-start)": { ".timeline-end": { "grid-column-start": "auto", "grid-row-start": "none" } } } }, ".timeline-snap-icon": { "> li": { "--timeline-col-start": "0.5rem", "--timeline-row-start": "minmax(0, 1fr)" } }, ".timeline-vertical": { "flex-direction": "column", "> li": { "justify-items": "center", "--timeline-row-start": "minmax(0, 1fr)", "--timeline-row-end": "minmax(0, 1fr)", "> hr": { height: "100%", width: "calc(0.25rem * 1)", "&:first-child": { "grid-column-start": "2", "grid-row-start": "1" }, "&:last-child": { "grid-column-start": "2", "grid-column-end": "auto", "grid-row-start": "3", "grid-row-end": "none" } } }, ".timeline-start": { "grid-column-start": "1", "grid-column-end": "2", "grid-row-start": "1", "grid-row-end": "4", "align-self": "center", "justify-self": "flex-end" }, ".timeline-end": { "grid-column-start": "3", "grid-column-end": "4", "grid-row-start": "1", "grid-row-end": "4", "align-self": "center", "justify-self": "flex-start" }, "&:has(.timeline-middle)": { "> li": { "> hr": { "&:first-child": { "border-top-left-radius": "0", "border-top-right-radius": "0", "border-bottom-right-radius": "var(--radius-selector)", "border-bottom-left-radius": "var(--radius-selector)" }, "&:last-child": { "border-top-left-radius": "var(--radius-selector)", "border-top-right-radius": "var(--radius-selector)", "border-bottom-right-radius": "0", "border-bottom-left-radius": "0" } } } }, "&:not(:has(.timeline-middle))": { ":first-child": { "> hr:last-child": { "border-top-left-radius": "var(--radius-selector)", "border-top-right-radius": "var(--radius-selector)", "border-bottom-right-radius": "0", "border-bottom-left-radius": "0" } }, ":last-child": { "> hr:first-child": { "border-top-left-radius": "0", "border-top-right-radius": "0", "border-bottom-right-radius": "var(--radius-selector)", "border-bottom-left-radius": "var(--radius-selector)" } } }, "&.timeline-snap-icon": { "> li": { "--timeline-col-start": "minmax(0, 1fr)", "--timeline-row-start": "0.5rem" } } }, ".timeline-horizontal": { "flex-direction": "row", "> li": { "align-items": "center", "> hr": { height: "calc(0.25rem * 1)", width: "100%", "&:first-child": { "grid-column-start": "1", "grid-row-start": "2" }, "&:last-child": { "grid-column-start": "3", "grid-column-end": "none", "grid-row-start": "2", "grid-row-end": "auto" } } }, ".timeline-start": { "grid-column-start": "1", "grid-column-end": "4", "grid-row-start": "1", "grid-row-end": "2", "align-self": "flex-end", "justify-self": "center" }, ".timeline-end": { "grid-column-start": "1", "grid-column-end": "4", "grid-row-start": "3", "grid-row-end": "4", "align-self": "flex-start", "justify-self": "center" }, "&:has(.timeline-middle)": { "> li": { "> hr": { "&:first-child": { "border-start-start-radius": "0", "border-end-start-radius": "0", "border-start-end-radius": "var(--radius-selector)", "border-end-end-radius": "var(--radius-selector)" }, "&:last-child": { "border-start-start-radius": "var(--radius-selector)", "border-end-start-radius": "var(--radius-selector)", "border-start-end-radius": "0", "border-end-end-radius": "0" } } } }, "&:not(:has(.timeline-middle))": { ":first-child": { "> hr:last-child": { "border-start-start-radius": "var(--radius-selector)", "border-end-start-radius": "var(--radius-selector)", "border-start-end-radius": "0", "border-end-end-radius": "0" } }, ":last-child": { "> hr:first-child": { "border-start-start-radius": "0", "border-end-start-radius": "0", "border-start-end-radius": "var(--radius-selector)", "border-end-end-radius": "var(--radius-selector)" } } } } }; + +// packages/daisyui/components/timeline/index.js +var timeline_default = ({ addComponents, prefix = "" }) => { + const prefixedtimeline = addPrefix(object_default51, prefix); + addComponents({ ...prefixedtimeline }); +}; + +// packages/daisyui/components/textarea/object.js +var object_default52 = { ".textarea": { border: "var(--border) solid #0000", "min-height": "calc(0.25rem * 20)", "flex-shrink": 1, appearance: "none", "border-radius": "var(--radius-field)", "background-color": "var(--color-base-100)", "padding-block": "calc(0.25rem * 2)", "vertical-align": "middle", width: "clamp(3rem, 20rem, 100%)", "padding-inline-start": "0.75rem", "padding-inline-end": "0.75rem", "font-size": "0.875rem", "border-color": "var(--input-color)", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset", "--input-color": "color-mix(in oklab, var(--color-base-content) 20%, #0000)", textarea: { appearance: "none", "background-color": "transparent", border: "none", "&:focus, &:focus-within": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } } }, "&:focus, &:focus-within": { "--input-color": "var(--color-base-content)", "box-shadow": "0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000)", outline: "2px solid var(--input-color)", "outline-offset": "2px", isolation: "isolate" }, "&:has(> textarea[disabled]), &:is(:disabled, [disabled])": { cursor: "not-allowed", "border-color": "var(--color-base-200)", "background-color": "var(--color-base-200)", color: "color-mix(in oklab, var(--color-base-content) 40%, transparent)", "&::placeholder": { color: "color-mix(in oklab, var(--color-base-content) 20%, transparent)" }, "box-shadow": "none" }, "&:has(> textarea[disabled]) > textarea[disabled]": { cursor: "not-allowed" } }, ".textarea-ghost": { "background-color": "transparent", "box-shadow": "none", "border-color": "#0000", "&:focus, &:focus-within": { "background-color": "var(--color-base-100)", color: "var(--color-base-content)", "border-color": "#0000", "box-shadow": "none" } }, ".textarea-neutral": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-neutral)" } }, ".textarea-primary": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-primary)" } }, ".textarea-secondary": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-secondary)" } }, ".textarea-accent": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-accent)" } }, ".textarea-info": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-info)" } }, ".textarea-success": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-success)" } }, ".textarea-warning": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-warning)" } }, ".textarea-error": { "&, &:focus, &:focus-within": { "--input-color": "var(--color-error)" } }, ".textarea-xs": { "font-size": "0.6875rem" }, ".textarea-sm": { "font-size": "0.75rem" }, ".textarea-md": { "font-size": "0.875rem" }, ".textarea-lg": { "font-size": "1.125rem" }, ".textarea-xl": { "font-size": "1.375rem" } }; + +// packages/daisyui/components/textarea/index.js +var textarea_default = ({ addComponents, prefix = "" }) => { + const prefixedtextarea = addPrefix(object_default52, prefix); + addComponents({ ...prefixedtextarea }); +}; + +// packages/daisyui/components/range/object.js +var object_default53 = { ".range": { appearance: "none", "webkit-appearance": "none", "--range-thumb": "var(--color-base-100)", "--range-thumb-size": "calc(var(--size-selector, 0.25rem) * 6)", "--range-progress": "currentColor", "--range-fill": "1", "--range-p": "0.25rem", "--range-bg": "color-mix(in oklab, currentColor 10%, #0000)", cursor: "pointer", overflow: "hidden", "background-color": "transparent", "vertical-align": "middle", width: "clamp(3rem, 20rem, 100%)", "--radius-selector-max": `calc( + var(--radius-selector) + var(--radius-selector) + var(--radius-selector) + )`, "border-radius": "calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)))", border: "none", height: "var(--range-thumb-size)", '[dir="rtl"] &': { "--range-dir": "-1" }, "&:focus": { outline: "none" }, "&:focus-visible": { outline: "2px solid", "outline-offset": "2px" }, "&::-webkit-slider-runnable-track": { width: "100%", "background-color": "var(--range-bg)", "border-radius": "var(--radius-selector)", height: "calc(var(--range-thumb-size) * 0.5)" }, "@media (forced-colors: active)": [{ "&::-webkit-slider-runnable-track": { border: "1px solid" } }, { "&::-moz-range-track": { border: "1px solid" } }], "&::-webkit-slider-thumb": { position: "relative", "box-sizing": "border-box", "border-radius": "calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)))", "background-color": "currentColor", height: "var(--range-thumb-size)", width: "var(--range-thumb-size)", border: "var(--range-p) solid", appearance: "none", "webkit-appearance": "none", top: "50%", color: "var(--range-progress)", transform: "translateY(-50%)", "box-shadow": "0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000), 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100rem) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100rem * var(--range-fill))" }, "&::-moz-range-track": { width: "100%", "background-color": "var(--range-bg)", "border-radius": "var(--radius-selector)", height: "calc(var(--range-thumb-size) * 0.5)" }, "&::-moz-range-thumb": { position: "relative", "box-sizing": "border-box", "border-radius": "calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)))", "background-color": "currentColor", height: "var(--range-thumb-size)", width: "var(--range-thumb-size)", border: "var(--range-p) solid", top: "50%", color: "var(--range-progress)", "box-shadow": "0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000), 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100rem) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100rem * var(--range-fill))" }, "&:disabled": { cursor: "not-allowed", opacity: "30%" } }, ".range-primary": { color: "var(--color-primary)", "--range-thumb": "var(--color-primary-content)" }, ".range-secondary": { color: "var(--color-secondary)", "--range-thumb": "var(--color-secondary-content)" }, ".range-accent": { color: "var(--color-accent)", "--range-thumb": "var(--color-accent-content)" }, ".range-neutral": { color: "var(--color-neutral)", "--range-thumb": "var(--color-neutral-content)" }, ".range-success": { color: "var(--color-success)", "--range-thumb": "var(--color-success-content)" }, ".range-warning": { color: "var(--color-warning)", "--range-thumb": "var(--color-warning-content)" }, ".range-info": { color: "var(--color-info)", "--range-thumb": "var(--color-info-content)" }, ".range-error": { color: "var(--color-error)", "--range-thumb": "var(--color-error-content)" }, ".range-xs": { "--range-thumb-size": "calc(var(--size-selector, 0.25rem) * 4)" }, ".range-sm": { "--range-thumb-size": "calc(var(--size-selector, 0.25rem) * 5)" }, ".range-md": { "--range-thumb-size": "calc(var(--size-selector, 0.25rem) * 6)" }, ".range-lg": { "--range-thumb-size": "calc(var(--size-selector, 0.25rem) * 7)" }, ".range-xl": { "--range-thumb-size": "calc(var(--size-selector, 0.25rem) * 8)" } }; + +// packages/daisyui/components/range/index.js +var range_default = ({ addComponents, prefix = "" }) => { + const prefixedrange = addPrefix(object_default53, prefix); + addComponents({ ...prefixedrange }); +}; + +// packages/daisyui/components/dock/object.js +var object_default54 = { ".dock": { position: "fixed", right: "calc(0.25rem * 0)", bottom: "calc(0.25rem * 0)", left: "calc(0.25rem * 0)", "z-index": 1, display: "flex", width: "100%", "flex-direction": "row", "align-items": "center", "justify-content": "space-around", "background-color": "var(--color-base-100)", padding: "calc(0.25rem * 2)", color: "currentColor", "border-top": "0.5px solid color-mix(in oklab, var(--color-base-content) 5%, #0000)", height: ["4rem", "calc(4rem + env(safe-area-inset-bottom))"], "padding-bottom": "env(safe-area-inset-bottom)", "> *": { position: "relative", "margin-bottom": "calc(0.25rem * 2)", display: "flex", height: "100%", "max-width": "calc(0.25rem * 32)", "flex-shrink": 1, "flex-basis": "100%", cursor: "pointer", "flex-direction": "column", "align-items": "center", "justify-content": "center", gap: "1px", "border-radius": "var(--radius-box)", "background-color": "transparent", transition: "opacity 0.2s ease-out", "@media (hover: hover)": { "&:hover": { opacity: "80%" } }, '&[aria-disabled="true"], &[disabled]': { "&, &:hover": { "pointer-events": "none", color: "color-mix(in oklab, var(--color-base-content) 10%, transparent)", opacity: "100%" } }, ".dock-label": { "font-size": "0.6875rem" }, "&:after": { content: '""', position: "absolute", height: "calc(0.25rem * 1)", width: "calc(0.25rem * 6)", "border-radius": "calc(infinity * 1px)", "background-color": "transparent", bottom: "0.2rem", "border-top": "3px solid transparent", transition: "background-color 0.1s ease-out, text-color 0.1s ease-out, width 0.1s ease-out" } } }, ".dock-active": { "&:after": { width: "calc(0.25rem * 10)", "background-color": "currentColor", color: "currentColor" } }, ".dock-xs": { height: ["3rem", "calc(3rem + env(safe-area-inset-bottom))"], ".dock-active": { "&:after": { bottom: "-0.1rem" } }, ".dock-label": { "font-size": "0.625rem" } }, ".dock-sm": { height: ["calc(0.25rem * 14)", "3.5rem", "calc(3.5rem + env(safe-area-inset-bottom))"], ".dock-active": { "&:after": { bottom: "-0.1rem" } }, ".dock-label": { "font-size": "0.625rem" } }, ".dock-md": { height: ["4rem", "calc(4rem + env(safe-area-inset-bottom))"], ".dock-label": { "font-size": "0.6875rem" } }, ".dock-lg": { height: ["4.5rem", "calc(4.5rem + env(safe-area-inset-bottom))"], ".dock-active": { "&:after": { bottom: "0.4rem" } }, ".dock-label": { "font-size": "0.6875rem" } }, ".dock-xl": { height: ["5rem", "calc(5rem + env(safe-area-inset-bottom))"], ".dock-active": { "&:after": { bottom: "0.4rem" } }, ".dock-label": { "font-size": "0.75rem" } } }; + +// packages/daisyui/components/dock/index.js +var dock_default = ({ addComponents, prefix = "" }) => { + const prefixeddock = addPrefix(object_default54, prefix); + addComponents({ ...prefixeddock }); +}; + +// packages/daisyui/components/breadcrumbs/object.js +var object_default55 = { ".breadcrumbs": { "max-width": "100%", "overflow-x": "auto", "padding-block": "calc(0.25rem * 2)", "> menu, > ul, > ol": { display: "flex", "min-height": "min-content", "align-items": "center", "white-space": "nowrap", "> li": { display: "flex", "align-items": "center", "> *": { display: "flex", cursor: "pointer", "align-items": "center", gap: "calc(0.25rem * 2)", "&:hover": { "@media (hover: hover)": { "text-decoration-line": "underline" } }, "&:focus": { "--tw-outline-style": "none", "outline-style": "none", "@media (forced-colors: active)": { outline: "2px solid transparent", "outline-offset": "2px" } }, "&:focus-visible": { outline: "2px solid currentColor", "outline-offset": "2px" } }, "& + *:before": { content: '""', "margin-right": "calc(0.25rem * 3)", "margin-left": "calc(0.25rem * 2)", display: "block", height: "calc(0.25rem * 1.5)", width: "calc(0.25rem * 1.5)", opacity: "40%", rotate: "45deg", "border-top": "1px solid", "border-right": "1px solid", "background-color": "#0000" }, '[dir="rtl"] & + *:before': { rotate: "-135deg" } } } } }; + +// packages/daisyui/components/breadcrumbs/index.js +var breadcrumbs_default = ({ addComponents, prefix = "" }) => { + const prefixedbreadcrumbs = addPrefix(object_default55, prefix); + addComponents({ ...prefixedbreadcrumbs }); +}; + +// packages/daisyui/components/radio/object.js +var object_default56 = { ".radio": { position: "relative", "flex-shrink": 0, cursor: "pointer", appearance: "none", "border-radius": "calc(infinity * 1px)", padding: "calc(0.25rem * 1)", "vertical-align": "middle", border: "var(--border) solid var(--input-color, color-mix(in srgb, currentColor 20%, #0000))", "box-shadow": "0 1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset", "--size": "calc(var(--size-selector, 0.25rem) * 6)", width: "var(--size)", height: "var(--size)", color: "var(--input-color, currentColor)", "&:before": { display: "block", width: "100%", height: "100%", "border-radius": "calc(infinity * 1px)", "--tw-content": '""', content: "var(--tw-content)", "background-size": "auto, calc(var(--noise) * 100%)", "background-image": "none, var(--fx-noise)" }, "&:focus-visible": { outline: "2px solid currentColor" }, '&:checked, &[aria-checked="true"]': { animation: "radio 0.2s ease-out", "border-color": "currentColor", "background-color": "var(--color-base-100)", "&:before": { "background-color": "currentColor", "box-shadow": "0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px oklch(0% 0 0 / calc(var(--depth) * 0.1))" }, "@media (forced-colors: active)": { "&:before": { "outline-style": "var(--tw-outline-style)", "outline-width": "1px", "outline-offset": "calc(1px * -1)" } }, "@media print": { "&:before": { outline: "0.25rem solid", "outline-offset": "-1rem" } } } }, ".radio-primary": { "--input-color": "var(--color-primary)" }, ".radio-secondary": { "--input-color": "var(--color-secondary)" }, ".radio-accent": { "--input-color": "var(--color-accent)" }, ".radio-neutral": { "--input-color": "var(--color-neutral)" }, ".radio-info": { "--input-color": "var(--color-info)" }, ".radio-success": { "--input-color": "var(--color-success)" }, ".radio-warning": { "--input-color": "var(--color-warning)" }, ".radio-error": { "--input-color": "var(--color-error)" }, ".radio:disabled": { cursor: "not-allowed", opacity: "20%" }, ".radio-xs": { padding: "0.125rem", '&:is([type="radio"])': { "--size": "calc(var(--size-selector, 0.25rem) * 4)" } }, ".radio-sm": { padding: "0.1875rem", '&:is([type="radio"])': { "--size": "calc(var(--size-selector, 0.25rem) * 5)" } }, ".radio-md": { padding: "0.25rem", '&:is([type="radio"])': { "--size": "calc(var(--size-selector, 0.25rem) * 6)" } }, ".radio-lg": { padding: "0.3125rem", '&:is([type="radio"])': { "--size": "calc(var(--size-selector, 0.25rem) * 7)" } }, ".radio-xl": { padding: "0.375rem", '&:is([type="radio"])': { "--size": "calc(var(--size-selector, 0.25rem) * 8)" } }, "@keyframes radio": { "0%": { padding: "5px" }, "50%": { padding: "3px" } } }; + +// packages/daisyui/components/radio/index.js +var radio_default = ({ addComponents, prefix = "" }) => { + const prefixedradio = addPrefix(object_default56, prefix); + addComponents({ ...prefixedradio }); +}; + +// packages/daisyui/components/skeleton/object.js +var object_default57 = { ".skeleton": { "border-radius": "var(--radius-box)", "background-color": "var(--color-base-300)", "@media (prefers-reduced-motion: reduce)": { "transition-duration": "15s" }, "will-change": "background-position", animation: "skeleton 1.8s ease-in-out infinite", "background-image": "linear-gradient( 105deg, #0000 0% 40%, var(--color-base-100) 50%, #0000 60% 100% )", "background-size": "200% auto", "background-repeat": "no-repeat", "background-position-x": "-50%" }, "@keyframes skeleton": { "0%": { "background-position": "150%" }, "100%": { "background-position": "-50%" } } }; + +// packages/daisyui/components/skeleton/index.js +var skeleton_default = ({ addComponents, prefix = "" }) => { + const prefixedskeleton = addPrefix(object_default57, prefix); + addComponents({ ...prefixedskeleton }); +}; + +// packages/daisyui/components/loading/object.js +var object_default58 = { ".loading": { "pointer-events": "none", display: "inline-block", "aspect-ratio": "1 / 1", "background-color": "currentColor", "vertical-align": "middle", width: "calc(var(--size-selector, 0.25rem) * 6)", "mask-size": "100%", "mask-repeat": "no-repeat", "mask-position": "center", "mask-image": `url("data:image/svg+xml,%3Csvg width='24' height='24' stroke='black' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cg transform-origin='center'%3E%3Ccircle cx='12' cy='12' r='9.5' fill='none' stroke-width='3' stroke-linecap='round'%3E%3CanimateTransform attributeName='transform' type='rotate' from='0 12 12' to='360 12 12' dur='2s' repeatCount='indefinite'/%3E%3Canimate attributeName='stroke-dasharray' values='0,150;42,150;42,150' keyTimes='0;0.475;1' dur='1.5s' repeatCount='indefinite'/%3E%3Canimate attributeName='stroke-dashoffset' values='0;-16;-59' keyTimes='0;0.475;1' dur='1.5s' repeatCount='indefinite'/%3E%3C/circle%3E%3C/g%3E%3C/svg%3E")` }, ".loading-spinner": { "mask-image": `url("data:image/svg+xml,%3Csvg width='24' height='24' stroke='black' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cg transform-origin='center'%3E%3Ccircle cx='12' cy='12' r='9.5' fill='none' stroke-width='3' stroke-linecap='round'%3E%3CanimateTransform attributeName='transform' type='rotate' from='0 12 12' to='360 12 12' dur='2s' repeatCount='indefinite'/%3E%3Canimate attributeName='stroke-dasharray' values='0,150;42,150;42,150' keyTimes='0;0.475;1' dur='1.5s' repeatCount='indefinite'/%3E%3Canimate attributeName='stroke-dashoffset' values='0;-16;-59' keyTimes='0;0.475;1' dur='1.5s' repeatCount='indefinite'/%3E%3C/circle%3E%3C/g%3E%3C/svg%3E")` }, ".loading-dots": { "mask-image": `url("data:image/svg+xml,%3Csvg width='24' height='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='4' cy='12' r='3'%3E%3Canimate attributeName='cy' values='12;6;12;12' keyTimes='0;0.286;0.571;1' dur='1.05s' repeatCount='indefinite' keySplines='.33,0,.66,.33;.33,.66,.66,1'/%3E%3C/circle%3E%3Ccircle cx='12' cy='12' r='3'%3E%3Canimate attributeName='cy' values='12;6;12;12' keyTimes='0;0.286;0.571;1' dur='1.05s' repeatCount='indefinite' keySplines='.33,0,.66,.33;.33,.66,.66,1' begin='0.1s'/%3E%3C/circle%3E%3Ccircle cx='20' cy='12' r='3'%3E%3Canimate attributeName='cy' values='12;6;12;12' keyTimes='0;0.286;0.571;1' dur='1.05s' repeatCount='indefinite' keySplines='.33,0,.66,.33;.33,.66,.66,1' begin='0.2s'/%3E%3C/circle%3E%3C/svg%3E")` }, ".loading-ring": { "mask-image": `url("data:image/svg+xml,%3Csvg width='44' height='44' viewBox='0 0 44 44' xmlns='http://www.w3.org/2000/svg' stroke='white'%3E%3Cg fill='none' fill-rule='evenodd' stroke-width='2'%3E%3Ccircle cx='22' cy='22' r='1'%3E%3Canimate attributeName='r' begin='0s' dur='1.8s' values='1;20' calcMode='spline' keyTimes='0;1' keySplines='0.165,0.84,0.44,1' repeatCount='indefinite'/%3E%3Canimate attributeName='stroke-opacity' begin='0s' dur='1.8s' values='1;0' calcMode='spline' keyTimes='0;1' keySplines='0.3,0.61,0.355,1' repeatCount='indefinite'/%3E%3C/circle%3E%3Ccircle cx='22' cy='22' r='1'%3E%3Canimate attributeName='r' begin='-0.9s' dur='1.8s' values='1;20' calcMode='spline' keyTimes='0;1' keySplines='0.165,0.84,0.44,1' repeatCount='indefinite'/%3E%3Canimate attributeName='stroke-opacity' begin='-0.9s' dur='1.8s' values='1;0' calcMode='spline' keyTimes='0;1' keySplines='0.3,0.61,0.355,1' repeatCount='indefinite'/%3E%3C/circle%3E%3C/g%3E%3C/svg%3E")` }, ".loading-ball": { "mask-image": `url("data:image/svg+xml,%3Csvg width='24' height='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cellipse cx='12' cy='5' rx='4' ry='4'%3E%3Canimate attributeName='cy' values='5;20;20.5;20;5' keyTimes='0;0.469;0.5;0.531;1' dur='.8s' repeatCount='indefinite' keySplines='.33,0,.66,.33;.33,.66,.66,1'/%3E%3Canimate attributeName='rx' values='4;4;4.8;4;4' keyTimes='0;0.469;0.5;0.531;1' dur='.8s' repeatCount='indefinite'/%3E%3Canimate attributeName='ry' values='4;4;3;4;4' keyTimes='0;0.469;0.5;0.531;1' dur='.8s' repeatCount='indefinite'/%3E%3C/ellipse%3E%3C/svg%3E")` }, ".loading-bars": { "mask-image": `url("data:image/svg+xml,%3Csvg width='24' height='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Crect x='1' y='1' width='6' height='22'%3E%3Canimate attributeName='y' values='1;5;1' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite'/%3E%3Canimate attributeName='height' values='22;14;22' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite'/%3E%3Canimate attributeName='opacity' values='1;0.2;1' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite'/%3E%3C/rect%3E%3Crect x='9' y='1' width='6' height='22'%3E%3Canimate attributeName='y' values='1;5;1' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite' begin='-0.65s'/%3E%3Canimate attributeName='height' values='22;14;22' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite' begin='-0.65s'/%3E%3Canimate attributeName='opacity' values='1;0.2;1' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite' begin='-0.65s'/%3E%3C/rect%3E%3Crect x='17' y='1' width='6' height='22'%3E%3Canimate attributeName='y' values='1;5;1' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite' begin='-0.5s'/%3E%3Canimate attributeName='height' values='22;14;22' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite' begin='-0.5s'/%3E%3Canimate attributeName='opacity' values='1;0.2;1' keyTimes='0;0.938;1' dur='.8s' repeatCount='indefinite' begin='-0.5s'/%3E%3C/rect%3E%3C/svg%3E")` }, ".loading-infinity": { "mask-image": `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' style='shape-rendering:auto;' width='200px' height='200px' viewBox='0 0 100 100' preserveAspectRatio='xMidYMid'%3E%3Cpath fill='none' stroke='black' stroke-width='10' stroke-dasharray='205.271 51.318' d='M24.3 30C11.4 30 5 43.3 5 50s6.4 20 19.3 20c19.3 0 32.1-40 51.4-40C88.6 30 95 43.3 95 50s-6.4 20-19.3 20C56.4 70 43.6 30 24.3 30z' stroke-linecap='round' style='transform:scale(0.8);transform-origin:50px 50px'%3E%3Canimate attributeName='stroke-dashoffset' repeatCount='indefinite' dur='2s' keyTimes='0;1' values='0;256.589'/%3E%3C/path%3E%3C/svg%3E")` }, ".loading-xs": { width: "calc(var(--size-selector, 0.25rem) * 4)" }, ".loading-sm": { width: "calc(var(--size-selector, 0.25rem) * 5)" }, ".loading-md": { width: "calc(var(--size-selector, 0.25rem) * 6)" }, ".loading-lg": { width: "calc(var(--size-selector, 0.25rem) * 7)" }, ".loading-xl": { width: "calc(var(--size-selector, 0.25rem) * 8)" } }; + +// packages/daisyui/components/loading/index.js +var loading_default = ({ addComponents, prefix = "" }) => { + const prefixedloading = addPrefix(object_default58, prefix); + addComponents({ ...prefixedloading }); +}; + +// packages/daisyui/components/validator/object.js +var object_default59 = { ".validator": { "&:user-valid, &:has(:user-valid)": { '&, &:focus, &:checked, &[aria-checked="true"], &:focus-within': { "--input-color": "var(--color-success)" } }, "&:user-invalid, &:has(:user-invalid), &[aria-invalid]": { '&, &:focus, &:checked, &[aria-checked="true"], &:focus-within': { "--input-color": "var(--color-error)" }, "& ~ .validator-hint": { visibility: "visible", display: "block", color: "var(--color-error)" } } }, ".validator-hint": { visibility: "hidden", "margin-top": "calc(0.25rem * 2)", "font-size": "0.75rem" } }; + +// packages/daisyui/components/validator/index.js +var validator_default = ({ addComponents, prefix = "" }) => { + const prefixedvalidator = addPrefix(object_default59, prefix); + addComponents({ ...prefixedvalidator }); +}; + +// packages/daisyui/components/collapse/object.js +var object_default60 = { ".collapse:not(td, tr, colgroup)": { visibility: "visible" }, ".collapse": { position: "relative", display: "grid", overflow: "hidden", "border-radius": "var(--radius-box, 1rem)", width: "100%", "grid-template-rows": "max-content 0fr", transition: "grid-template-rows 0.2s", isolation: "isolate", '> input:is([type="checkbox"], [type="radio"])': { "grid-column-start": "1", "grid-row-start": "1", appearance: "none", opacity: 0, "z-index": 1, width: "100%", padding: "1rem", "padding-inline-end": "3rem", "min-height": "3.75rem", transition: "background-color 0.2s ease-out" }, '&:is([open], :focus:not(.collapse-close)), &:not(.collapse-close):has(> input:is([type="checkbox"], [type="radio"]):checked)': { "grid-template-rows": "max-content 1fr" }, '&:is([open], :focus:not(.collapse-close)) > .collapse-content, &:not(.collapse-close) > :where(input:is([type="checkbox"], [type="radio"]):checked ~ .collapse-content)': { visibility: "visible", "min-height": "fit-content" }, '&:focus-visible, &:has(> input:is([type="checkbox"], [type="radio"]):focus-visible)': { "outline-color": "var(--color-base-content)", "outline-style": "solid", "outline-width": "2px", "outline-offset": "2px" }, "&:not(.collapse-close)": { '> input[type="checkbox"], > input[type="radio"]:not(:checked), > .collapse-title': { cursor: "pointer" } }, "&:focus:not(.collapse-close, .collapse[open]) > .collapse-title": { cursor: "unset" }, '&:is([open], :focus:not(.collapse-close)) > :where(.collapse-content), &:not(.collapse-close) > :where(input:is([type="checkbox"], [type="radio"]):checked ~ .collapse-content)': { "padding-bottom": "1rem", transition: "padding 0.2s ease-out, background-color 0.2s ease-out" }, "&:is([open])": { "&.collapse-arrow": { "> .collapse-title:after": { transform: "translateY(-50%) rotate(225deg)" } } }, "&.collapse-open": { "&.collapse-arrow": { "> .collapse-title:after": { transform: "translateY(-50%) rotate(225deg)" } }, "&.collapse-plus": { "> .collapse-title:after": { content: '"−"' } } }, "&.collapse-arrow:focus:not(.collapse-close)": { "> .collapse-title:after": { transform: "translateY(-50%) rotate(225deg)" } }, "&.collapse-arrow:not(.collapse-close)": { '> input:is([type="checkbox"], [type="radio"]):checked ~ .collapse-title:after': { transform: "translateY(-50%) rotate(225deg)" } }, "&[open]": { "&.collapse-plus": { "> .collapse-title:after": { content: '"−"' } } }, "&.collapse-plus:focus:not(.collapse-close)": { "> .collapse-title:after": { content: '"−"' } }, "&.collapse-plus:not(.collapse-close)": { '> input:is([type="checkbox"], [type="radio"]):checked ~ .collapse-title:after': { content: '"−"' } } }, ".collapse-title, .collapse-content": { "grid-column-start": "1", "grid-row-start": "1" }, ".collapse-content": { visibility: "hidden", "grid-column-start": "1", "grid-row-start": "2", "min-height": "0", "padding-left": "1rem", "padding-right": "1rem", cursor: "unset", transition: "visibility 0.2s, padding 0.2s ease-out, background-color 0.2s ease-out" }, ".collapse:is(details)": { width: "100%", "& summary": { position: "relative", display: "block", "&::-webkit-details-marker": { display: "none" } } }, ".collapse:is(details) summary": { outline: "none" }, ".collapse-arrow": { "> .collapse-title:after": { position: "absolute", display: "block", height: "0.5rem", width: "0.5rem", transform: "translateY(-100%) rotate(45deg)", "transition-property": "all", "transition-timing-function": "cubic-bezier(0.4, 0, 0.2, 1)", "transition-duration": "0.2s", top: "1.9rem", "inset-inline-end": "1.4rem", content: '""', "transform-origin": "75% 75%", "box-shadow": "2px 2px", "pointer-events": "none" } }, ".collapse-plus": { "> .collapse-title:after": { position: "absolute", display: "block", height: "0.5rem", width: "0.5rem", "transition-property": "all", "transition-duration": "300ms", "transition-timing-function": "cubic-bezier(0.4, 0, 0.2, 1)", top: "0.9rem", "inset-inline-end": "1.4rem", content: '"+"', "pointer-events": "none" } }, ".collapse-title": { position: "relative", width: "100%", padding: "1rem", "padding-inline-end": "3rem", "min-height": "3.75rem", transition: "background-color 0.2s ease-out" }, ".collapse-open": { "grid-template-rows": "max-content 1fr", "> .collapse-content": { visibility: "visible", "min-height": "fit-content", "padding-bottom": "1rem", transition: "padding 0.2s ease-out, background-color 0.2s ease-out" } } }; + +// packages/daisyui/components/collapse/index.js +var collapse_default = ({ addComponents, prefix = "" }) => { + const prefixedcollapse = addPrefix(object_default60, prefix); + addComponents({ ...prefixedcollapse }); +}; + +// packages/daisyui/components/swap/object.js +var object_default61 = { ".swap": { position: "relative", display: "inline-grid", cursor: "pointer", "place-content": "center", "vertical-align": "middle", "webkit-user-select": "none", "user-select": "none", input: { appearance: "none", border: "none" }, "> *": { "grid-column-start": "1", "grid-row-start": "1", "transition-property": "transform, rotate, opacity", "transition-duration": "0.2s", "transition-timing-function": "cubic-bezier(0, 0, 0.2, 1)" }, ".swap-on, .swap-indeterminate, input:indeterminate ~ .swap-on": { opacity: "0%" }, "input:is(:checked, :indeterminate)": { "& ~ .swap-off": { opacity: "0%" } }, "input:checked ~ .swap-on, input:indeterminate ~ .swap-indeterminate": { opacity: "100%", "backface-visibility": "visible" } }, ".swap-active": { ".swap-off": { opacity: "0%" }, ".swap-on": { opacity: "100%" } }, ".swap-rotate": { ".swap-on, input:indeterminate ~ .swap-on": { rotate: "45deg" }, "input:is(:checked, :indeterminate) ~ .swap-on, &.swap-active .swap-on": { rotate: "0deg" }, "input:is(:checked, :indeterminate) ~ .swap-off, &.swap-active .swap-off": { rotate: "calc(45deg * -1)" } }, ".swap-flip": { "transform-style": "preserve-3d", perspective: "20rem", ".swap-on, .swap-indeterminate, input:indeterminate ~ .swap-on": { transform: "rotateY(180deg)", "backface-visibility": "hidden" }, "input:is(:checked, :indeterminate) ~ .swap-on, &.swap-active .swap-on": { transform: "rotateY(0deg)" }, "input:is(:checked, :indeterminate) ~ .swap-off, &.swap-active .swap-off": { transform: "rotateY(-180deg)", "backface-visibility": "hidden", opacity: "100%" } } }; + +// packages/daisyui/components/swap/index.js +var swap_default = ({ addComponents, prefix = "" }) => { + const prefixedswap = addPrefix(object_default61, prefix); + addComponents({ ...prefixedswap }); +}; + +// packages/daisyui/utilities/typography/object.js +var object_default62 = { ":root .prose": { "--tw-prose-body": "color-mix(in oklab, var(--color-base-content) 80%, #0000)", "--tw-prose-headings": "var(--color-base-content)", "--tw-prose-lead": "var(--color-base-content)", "--tw-prose-links": "var(--color-base-content)", "--tw-prose-bold": "var(--color-base-content)", "--tw-prose-counters": "var(--color-base-content)", "--tw-prose-bullets": "color-mix(in oklab, var(--color-base-content) 50%, #0000)", "--tw-prose-hr": "color-mix(in oklab, var(--color-base-content) 20%, #0000)", "--tw-prose-quotes": "var(--color-base-content)", "--tw-prose-quote-borders": "color-mix(in oklab, var(--color-base-content) 20%, #0000)", "--tw-prose-captions": "color-mix(in oklab, var(--color-base-content) 50%, #0000)", "--tw-prose-code": "var(--color-base-content)", "--tw-prose-pre-code": "var(--color-neutral-content)", "--tw-prose-pre-bg": "var(--color-neutral)", "--tw-prose-th-borders": "color-mix(in oklab, var(--color-base-content) 50%, #0000)", "--tw-prose-td-borders": "color-mix(in oklab, var(--color-base-content) 20%, #0000)", "--tw-prose-kbd": "color-mix(in oklab, var(--color-base-content) 80%, #0000)", ":where(code):not(pre > code)": { "background-color": "var(--color-base-200)", "border-radius": "var(--radius-selector)", border: "var(--border) solid var(--color-base-300)", "padding-inline": "0.5em", "font-weight": "inherit", "&:before, &:after": { display: "none" } } } }; + +// packages/daisyui/utilities/typography/index.js +var typography_default = ({ addUtilities, prefix = "" }) => { + const prefixedtypography = addPrefix(object_default62, prefix); + addUtilities({ ...prefixedtypography }); +}; + +// packages/daisyui/utilities/glass/object.js +var object_default63 = { ".glass": { border: "none", "backdrop-filter": "blur(var(--glass-blur, 40px))", "background-color": "#0000", "background-image": "linear-gradient( 135deg, oklch(100% 0 0 / var(--glass-opacity, 30%)) 0%, oklch(0% 0 0 / 0%) 100% ), linear-gradient( var(--glass-reflect-degree, 100deg), oklch(100% 0 0 / var(--glass-reflect-opacity, 5%)) 25%, oklch(0% 0 0 / 0%) 25% )", "box-shadow": "0 0 0 1px oklch(100% 0 0 / var(--glass-border-opacity, 20%)) inset, 0 0 0 2px oklch(0% 0 0 / 5%)", "text-shadow": "0 1px oklch(0% 0 0 / var(--glass-text-shadow-opacity, 5%))" } }; + +// packages/daisyui/utilities/glass/index.js +var glass_default = ({ addUtilities, prefix = "" }) => { + const prefixedglass = addPrefix(object_default63, prefix); + addUtilities({ ...prefixedglass }); +}; + +// packages/daisyui/utilities/join/object.js +var object_default64 = { ".join": { display: "inline-flex", "align-items": "stretch", "--join-ss": "0", "--join-se": "0", "--join-es": "0", "--join-ee": "0", ":where(.join-item)": { "border-start-start-radius": "var(--join-ss, 0)", "border-start-end-radius": "var(--join-se, 0)", "border-end-start-radius": "var(--join-es, 0)", "border-end-end-radius": "var(--join-ee, 0)", "*": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" } }, "> .join-item:where(:first-child)": { "--join-ss": "var(--radius-field)", "--join-se": "0", "--join-es": "var(--radius-field)", "--join-ee": "0" }, ":first-child:not(:last-child)": { ":where(.join-item)": { "--join-ss": "var(--radius-field)", "--join-se": "0", "--join-es": "var(--radius-field)", "--join-ee": "0" } }, "> .join-item:where(:last-child)": { "--join-ss": "0", "--join-se": "var(--radius-field)", "--join-es": "0", "--join-ee": "var(--radius-field)" }, ":last-child:not(:first-child)": { ":where(.join-item)": { "--join-ss": "0", "--join-se": "var(--radius-field)", "--join-es": "0", "--join-ee": "var(--radius-field)" } }, "> .join-item:where(:only-child)": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" }, ":only-child": { ":where(.join-item)": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" } } }, ".join-item": { "&:where(*:not(:first-child, :disabled, [disabled], .btn-disabled))": { "margin-inline-start": "calc(var(--border, 1px) * -1)", "margin-block-start": "0" } }, ".join-vertical": { "flex-direction": "column", "> .join-item:first-child": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "0", "--join-ee": "0" }, ":first-child:not(:last-child)": { ".join-item": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "0", "--join-ee": "0" } }, "> .join-item:last-child": { "--join-ss": "0", "--join-se": "0", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" }, ":last-child:not(:first-child)": { ".join-item": { "--join-ss": "0", "--join-se": "0", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" } }, "> .join-item:only-child": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" }, ":only-child": { ".join-item": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" } }, ".join-item": { "&:where(*:not(:first-child))": { "margin-inline-start": "0", "margin-block-start": "calc(var(--border, 1px) * -1)" } } }, ".join-horizontal": { "flex-direction": "row", "> .join-item:first-child": { "--join-ss": "var(--radius-field)", "--join-se": "0", "--join-es": "var(--radius-field)", "--join-ee": "0" }, ":first-child:not(:last-child)": { ".join-item": { "--join-ss": "var(--radius-field)", "--join-se": "0", "--join-es": "var(--radius-field)", "--join-ee": "0" } }, "> .join-item:last-child": { "--join-ss": "0", "--join-se": "var(--radius-field)", "--join-es": "0", "--join-ee": "var(--radius-field)" }, ":last-child:not(:first-child)": { ".join-item": { "--join-ss": "0", "--join-se": "var(--radius-field)", "--join-es": "0", "--join-ee": "var(--radius-field)" } }, "> .join-item:only-child": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" }, ":only-child": { ".join-item": { "--join-ss": "var(--radius-field)", "--join-se": "var(--radius-field)", "--join-es": "var(--radius-field)", "--join-ee": "var(--radius-field)" } }, ".join-item": { "&:where(*:not(:first-child))": { "margin-inline-start": "calc(var(--border, 1px) * -1)", "margin-block-start": "0" } } } }; + +// packages/daisyui/utilities/join/index.js +var join_default = ({ addUtilities, prefix = "" }) => { + const prefixedjoin = addPrefix(object_default64, prefix); + addUtilities({ ...prefixedjoin }); +}; + +// packages/daisyui/utilities/radius/object.js +var object_default65 = { ".rounded-box": { "border-radius": "var(--radius-box)" }, ".rounded-field": { "border-radius": "var(--radius-field)" }, ".rounded-selector": { "border-radius": "var(--radius-selector)" }, ".rounded-t-box": { "border-top-left-radius": "var(--radius-box)", "border-top-right-radius": "var(--radius-box)" }, ".rounded-b-box": { "border-bottom-left-radius": "var(--radius-box)", "border-bottom-right-radius": "var(--radius-box)" }, ".rounded-l-box": { "border-top-left-radius": "var(--radius-box)", "border-bottom-left-radius": "var(--radius-box)" }, ".rounded-r-box": { "border-top-right-radius": "var(--radius-box)", "border-bottom-right-radius": "var(--radius-box)" }, ".rounded-tl-box": { "border-top-left-radius": "var(--radius-box)" }, ".rounded-tr-box": { "border-top-right-radius": "var(--radius-box)" }, ".rounded-br-box": { "border-bottom-right-radius": "var(--radius-box)" }, ".rounded-bl-box": { "border-bottom-left-radius": "var(--radius-box)" }, ".rounded-t-field": { "border-top-left-radius": "var(--radius-field)", "border-top-right-radius": "var(--radius-field)" }, ".rounded-b-field": { "border-bottom-left-radius": "var(--radius-field)", "border-bottom-right-radius": "var(--radius-field)" }, ".rounded-l-field": { "border-top-left-radius": "var(--radius-field)", "border-bottom-left-radius": "var(--radius-field)" }, ".rounded-r-field": { "border-top-right-radius": "var(--radius-field)", "border-bottom-right-radius": "var(--radius-field)" }, ".rounded-tl-field": { "border-top-left-radius": "var(--radius-field)" }, ".rounded-tr-field": { "border-top-right-radius": "var(--radius-field)" }, ".rounded-br-field": { "border-bottom-right-radius": "var(--radius-field)" }, ".rounded-bl-field": { "border-bottom-left-radius": "var(--radius-field)" }, ".rounded-t-selector": { "border-top-left-radius": "var(--radius-selector)", "border-top-right-radius": "var(--radius-selector)" }, ".rounded-b-selector": { "border-bottom-left-radius": "var(--radius-selector)", "border-bottom-right-radius": "var(--radius-selector)" }, ".rounded-l-selector": { "border-top-left-radius": "var(--radius-selector)", "border-bottom-left-radius": "var(--radius-selector)" }, ".rounded-r-selector": { "border-top-right-radius": "var(--radius-selector)", "border-bottom-right-radius": "var(--radius-selector)" }, ".rounded-tl-selector": { "border-top-left-radius": "var(--radius-selector)" }, ".rounded-tr-selector": { "border-top-right-radius": "var(--radius-selector)" }, ".rounded-br-selector": { "border-bottom-right-radius": "var(--radius-selector)" }, ".rounded-bl-selector": { "border-bottom-left-radius": "var(--radius-selector)" } }; + +// packages/daisyui/utilities/radius/index.js +var radius_default = ({ addUtilities, prefix = "" }) => { + const prefixedradius = addPrefix(object_default65, prefix); + addUtilities({ ...prefixedradius }); +}; + +// packages/daisyui/imports.js +var base = { rootscrolllock: rootscrolllock_default, rootcolor: rootcolor_default, scrollbar: scrollbar_default, properties: properties_default, rootscrollgutter: rootscrollgutter_default, svg: svg_default }; +var components = { drawer: drawer_default, link: link_default, stat: stat_default, carousel: carousel_default, divider: divider_default, mask: mask_default, fieldset: fieldset_default, dropdown: dropdown_default, card: card_default, steps: steps_default, alert: alert_default, kbd: kbd_default, select: select_default, progress: progress_default, fileinput: fileinput_default, modal: modal_default, footer: footer_default, table: table_default, avatar: avatar_default, input: input_default, checkbox: checkbox_default, badge: badge_default, status: status_default, diff: diff_default, hero: hero_default, toggle: toggle_default, stack: stack_default, navbar: navbar_default, label: label_default, menu: menu_default, toast: toast_default, button: button_default, list: list_default, mockup: mockup_default, calendar: calendar_default, indicator: indicator_default, rating: rating_default, tab: tab_default, filter: filter_default, chat: chat_default, radialprogress: radialprogress_default, countdown: countdown_default, tooltip: tooltip_default, timeline: timeline_default, textarea: textarea_default, range: range_default, dock: dock_default, breadcrumbs: breadcrumbs_default, radio: radio_default, skeleton: skeleton_default, loading: loading_default, validator: validator_default, collapse: collapse_default, swap: swap_default }; +var utilities = { typography: typography_default, glass: glass_default, join: join_default, radius: radius_default }; + +// packages/daisyui/index.js +var version = "5.0.35"; +var daisyui_default = plugin.withOptions((options) => { + return ({ addBase, addComponents, addUtilities }) => { + const { + include, + exclude, + prefix = "" + } = pluginOptionsHandler(options, addBase, object_default, version); + const shouldIncludeItem = (name) => { + if (include && exclude) { + return include.includes(name) && !exclude.includes(name); + } + if (include) { + return include.includes(name); + } + if (exclude) { + return !exclude.includes(name); + } + return true; + }; + Object.entries(base).forEach(([name, item]) => { + if (!shouldIncludeItem(name)) + return; + item({ addBase, prefix }); + }); + Object.entries(components).forEach(([name, item]) => { + if (!shouldIncludeItem(name)) + return; + item({ addComponents, prefix }); + }); + Object.entries(utilities).forEach(([name, item]) => { + if (!shouldIncludeItem(name)) + return; + item({ addUtilities, prefix }); + }); + }; +}, () => ({ + theme: { + extend: variables_default + } +})); + + +/* + + MIT License + + Copyright (c) 2020 Pouya Saadeghi – https://daisyui.com + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +*/ diff --git a/web-ng/assets/vendor/heroicons.js b/web-ng/assets/vendor/heroicons.js new file mode 100644 index 000000000..296f80e48 --- /dev/null +++ b/web-ng/assets/vendor/heroicons.js @@ -0,0 +1,43 @@ +const plugin = require("tailwindcss/plugin") +const fs = require("fs") +const path = require("path") + +module.exports = plugin(function({matchComponents, theme}) { + let iconsDir = path.join(__dirname, "../../deps/heroicons/optimized") + let values = {} + let icons = [ + ["", "/24/outline"], + ["-solid", "/24/solid"], + ["-mini", "/20/solid"], + ["-micro", "/16/solid"] + ] + icons.forEach(([suffix, dir]) => { + fs.readdirSync(path.join(iconsDir, dir)).forEach(file => { + let name = path.basename(file, ".svg") + suffix + values[name] = {name, fullPath: path.join(iconsDir, dir, file)} + }) + }) + matchComponents({ + "hero": ({name, fullPath}) => { + let content = fs.readFileSync(fullPath).toString().replace(/\r?\n|\r/g, "") + content = encodeURIComponent(content) + let size = theme("spacing.6") + if (name.endsWith("-mini")) { + size = theme("spacing.5") + } else if (name.endsWith("-micro")) { + size = theme("spacing.4") + } + return { + [`--hero-${name}`]: `url('data:image/svg+xml;utf8,${content}')`, + "-webkit-mask": `var(--hero-${name})`, + "mask": `var(--hero-${name})`, + "mask-repeat": "no-repeat", + "background-color": "currentColor", + "vertical-align": "middle", + "display": "inline-block", + "width": size, + "height": size + } + } + }, {values}) +}) diff --git a/web-ng/assets/vendor/topbar.js b/web-ng/assets/vendor/topbar.js new file mode 100644 index 000000000..0552337c2 --- /dev/null +++ b/web-ng/assets/vendor/topbar.js @@ -0,0 +1,138 @@ +/** + * @license MIT + * topbar 3.0.0 + * http://buunguyen.github.io/topbar + * Copyright (c) 2024 Buu Nguyen + */ +(function (window, document) { + "use strict"; + + var canvas, + currentProgress, + showing, + progressTimerId = null, + fadeTimerId = null, + delayTimerId = null, + addEvent = function (elem, type, handler) { + if (elem.addEventListener) elem.addEventListener(type, handler, false); + else if (elem.attachEvent) elem.attachEvent("on" + type, handler); + else elem["on" + type] = handler; + }, + options = { + autoRun: true, + barThickness: 3, + barColors: { + 0: "rgba(26, 188, 156, .9)", + ".25": "rgba(52, 152, 219, .9)", + ".50": "rgba(241, 196, 15, .9)", + ".75": "rgba(230, 126, 34, .9)", + "1.0": "rgba(211, 84, 0, .9)", + }, + shadowBlur: 10, + shadowColor: "rgba(0, 0, 0, .6)", + className: null, + }, + repaint = function () { + canvas.width = window.innerWidth; + canvas.height = options.barThickness * 5; // need space for shadow + + var ctx = canvas.getContext("2d"); + ctx.shadowBlur = options.shadowBlur; + ctx.shadowColor = options.shadowColor; + + var lineGradient = ctx.createLinearGradient(0, 0, canvas.width, 0); + for (var stop in options.barColors) + lineGradient.addColorStop(stop, options.barColors[stop]); + ctx.lineWidth = options.barThickness; + ctx.beginPath(); + ctx.moveTo(0, options.barThickness / 2); + ctx.lineTo( + Math.ceil(currentProgress * canvas.width), + options.barThickness / 2 + ); + ctx.strokeStyle = lineGradient; + ctx.stroke(); + }, + createCanvas = function () { + canvas = document.createElement("canvas"); + var style = canvas.style; + style.position = "fixed"; + style.top = style.left = style.right = style.margin = style.padding = 0; + style.zIndex = 100001; + style.display = "none"; + if (options.className) canvas.classList.add(options.className); + addEvent(window, "resize", repaint); + }, + topbar = { + config: function (opts) { + for (var key in opts) + if (options.hasOwnProperty(key)) options[key] = opts[key]; + }, + show: function (delay) { + if (showing) return; + if (delay) { + if (delayTimerId) return; + delayTimerId = setTimeout(() => topbar.show(), delay); + } else { + showing = true; + if (fadeTimerId !== null) window.cancelAnimationFrame(fadeTimerId); + if (!canvas) createCanvas(); + if (!canvas.parentElement) document.body.appendChild(canvas); + canvas.style.opacity = 1; + canvas.style.display = "block"; + topbar.progress(0); + if (options.autoRun) { + (function loop() { + progressTimerId = window.requestAnimationFrame(loop); + topbar.progress( + "+" + 0.05 * Math.pow(1 - Math.sqrt(currentProgress), 2) + ); + })(); + } + } + }, + progress: function (to) { + if (typeof to === "undefined") return currentProgress; + if (typeof to === "string") { + to = + (to.indexOf("+") >= 0 || to.indexOf("-") >= 0 + ? currentProgress + : 0) + parseFloat(to); + } + currentProgress = to > 1 ? 1 : to; + repaint(); + return currentProgress; + }, + hide: function () { + clearTimeout(delayTimerId); + delayTimerId = null; + if (!showing) return; + showing = false; + if (progressTimerId != null) { + window.cancelAnimationFrame(progressTimerId); + progressTimerId = null; + } + (function loop() { + if (topbar.progress("+.1") >= 1) { + canvas.style.opacity -= 0.05; + if (canvas.style.opacity <= 0.05) { + canvas.style.display = "none"; + fadeTimerId = null; + return; + } + } + fadeTimerId = window.requestAnimationFrame(loop); + })(); + }, + }; + + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = topbar; + } else if (typeof define === "function" && define.amd) { + define(function () { + return topbar; + }); + } else { + this.topbar = topbar; + } +}.call(this, window, document)); diff --git a/web-ng/config/config.exs b/web-ng/config/config.exs new file mode 100644 index 000000000..3baebaf09 --- /dev/null +++ b/web-ng/config/config.exs @@ -0,0 +1,83 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +config :serviceradar_web_ng, :scopes, + user: [ + default: true, + module: ServiceRadarWebNG.Accounts.Scope, + assign_key: :current_scope, + access_path: [:user, :id], + schema_key: :user_id, + schema_type: :id, + schema_table: :ng_users, + test_data_fixture: ServiceRadarWebNG.AccountsFixtures, + test_setup_helper: :register_and_log_in_user + ] + +config :serviceradar_web_ng, + namespace: ServiceRadarWebNG, + ecto_repos: [ServiceRadarWebNG.Repo], + generators: [timestamp_type: :utc_datetime] + +config :serviceradar_web_ng, ServiceRadarWebNG.Repo, migration_source: "ng_schema_migrations" + +config :serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL + +# Configure the endpoint +config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + url: [host: "localhost"], + adapter: Bandit.PhoenixAdapter, + render_errors: [ + formats: [html: ServiceRadarWebNGWeb.ErrorHTML, json: ServiceRadarWebNGWeb.ErrorJSON], + layout: false + ], + pubsub_server: ServiceRadarWebNG.PubSub, + live_view: [signing_salt: "3bWAu579"] + +# Configure the mailer +# +# By default it uses the "Local" adapter which stores the emails +# locally. You can see the emails in your browser, at "/dev/mailbox". +# +# For production it's recommended to configure a different adapter +# at the `config/runtime.exs`. +config :serviceradar_web_ng, ServiceRadarWebNG.Mailer, adapter: Swoosh.Adapters.Local + +# Configure esbuild (the version is required) +config :esbuild, + version: "0.25.4", + serviceradar_web_ng: [ + args: + ~w(js/app.js --bundle --target=es2022 --outdir=../priv/static/assets/js --external:/fonts/* --external:/images/* --alias:@=.), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => [Path.expand("../deps", __DIR__), Mix.Project.build_path()]} + ] + +# Configure tailwind (the version is required) +config :tailwind, + version: "4.1.12", + serviceradar_web_ng: [ + args: ~w( + --input=assets/css/app.css + --output=priv/static/assets/css/app.css + ), + cd: Path.expand("..", __DIR__) + ] + +# Configure Elixir's Logger +config :logger, :default_formatter, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Use Jason for JSON parsing in Phoenix +config :phoenix, :json_library, Jason + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" diff --git a/web-ng/config/dev.exs b/web-ng/config/dev.exs new file mode 100644 index 000000000..742c9c91e --- /dev/null +++ b/web-ng/config/dev.exs @@ -0,0 +1,154 @@ +import Config + +# Configure your database +cnpg_ssl_mode = System.get_env("CNPG_SSL_MODE", "disable") +cnpg_ssl_enabled = cnpg_ssl_mode != "disable" +cnpg_hostname = System.get_env("CNPG_HOST", "localhost") +cnpg_tls_server_name = System.get_env("CNPG_TLS_SERVER_NAME", cnpg_hostname) + +cnpg_cert_dir = System.get_env("CNPG_CERT_DIR", "") + +cnpg_ca_file = + System.get_env( + "CNPG_CA_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "root.pem"), else: "") + ) + +cnpg_cert_file = + System.get_env( + "CNPG_CERT_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "workstation.pem"), else: "") + ) + +cnpg_key_file = + System.get_env( + "CNPG_KEY_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "workstation-key.pem"), else: "") + ) + +cnpg_verify_peer = cnpg_ssl_mode in ~w(verify-ca verify-full) + +cnpg_ssl_opts = + [verify: if(cnpg_verify_peer, do: :verify_peer, else: :verify_none)] + |> then(fn opts -> + if cnpg_verify_peer and cnpg_ca_file != "" do + Keyword.put(opts, :cacertfile, cnpg_ca_file) + else + opts + end + end) + |> then(fn opts -> + if cnpg_cert_file != "" and cnpg_key_file != "" do + opts + |> Keyword.put(:certfile, cnpg_cert_file) + |> Keyword.put(:keyfile, cnpg_key_file) + else + opts + end + end) + |> then(fn opts -> + if cnpg_ssl_mode == "verify-full" and cnpg_tls_server_name != "" do + opts + |> Keyword.put(:server_name_indication, String.to_charlist(cnpg_tls_server_name)) + |> Keyword.put(:customize_hostname_check, + match_fun: :public_key.pkix_verify_hostname_match_fun(:https) + ) + else + opts + end + end) + +config :serviceradar_web_ng, ServiceRadarWebNG.Repo, + username: System.get_env("CNPG_USERNAME", "postgres"), + password: System.get_env("CNPG_PASSWORD", "postgres"), + hostname: cnpg_hostname, + port: String.to_integer(System.get_env("CNPG_PORT", "5432")), + database: System.get_env("CNPG_DATABASE", "serviceradar_web_ng_dev"), + ssl: if(cnpg_ssl_enabled, do: cnpg_ssl_opts, else: false), + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool_size: 10 + +# For development, we disable any cache and enable +# debugging and code reloading. +# +# The watchers configuration can be used to run external +# watchers to your application. For example, we can use it +# to bundle .js and .css sources. +config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + # Binding to loopback ipv4 address prevents access from other machines. + # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. + http: [ip: {127, 0, 0, 1}], + check_origin: false, + code_reloader: true, + debug_errors: true, + secret_key_base: + System.get_env("DEV_SECRET_KEY_BASE") || + System.get_env("SECRET_KEY_BASE") || + Base.encode64(:crypto.strong_rand_bytes(48)), + watchers: [ + esbuild: {Esbuild, :install_and_run, [:serviceradar_web_ng, ~w(--sourcemap=inline --watch)]}, + tailwind: {Tailwind, :install_and_run, [:serviceradar_web_ng, ~w(--watch)]} + ] + +# ## SSL Support +# +# In order to use HTTPS in development, a self-signed +# certificate can be generated by running the following +# Mix task: +# +# mix phx.gen.cert +# +# Run `mix help phx.gen.cert` for more information. +# +# The `http:` config above can be replaced with: +# +# https: [ +# port: 4001, +# cipher_suite: :strong, +# keyfile: "priv/cert/selfsigned_key.pem", +# certfile: "priv/cert/selfsigned.pem" +# ], +# +# If desired, both `http:` and `https:` keys can be +# configured to run both http and https servers on +# different ports. + +# Reload browser tabs when matching files change. +config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + live_reload: [ + web_console_logger: true, + patterns: [ + # Static assets, except user uploads + ~r"priv/static/(?!uploads/).*\.(js|css|png|jpeg|jpg|gif|svg)$"E, + # Gettext translations + ~r"priv/gettext/.*\.po$"E, + # Router, Controllers, LiveViews and LiveComponents + ~r"lib/serviceradar_web_ng_web/router\.ex$"E, + ~r"lib/serviceradar_web_ng_web/(controllers|live|components)/.*\.(ex|heex)$"E + ] + ] + +# Enable dev routes for dashboard and mailbox +config :serviceradar_web_ng, dev_routes: true + +# Do not include metadata nor timestamps in development logs +config :logger, :default_formatter, format: "[$level] $message\n" + +# Set a higher stacktrace during development. Avoid configuring such +# in production as building large stacktraces may be expensive. +config :phoenix, :stacktrace_depth, 20 + +# Initialize plugs at runtime for faster development compilation +config :phoenix, :plug_init_mode, :runtime + +config :phoenix_live_view, + # Include debug annotations and locations in rendered markup. + # Changing this configuration will require mix clean and a full recompile. + debug_heex_annotations: true, + debug_attributes: true, + # Enable helpful, but potentially expensive runtime checks + enable_expensive_runtime_checks: true + +# Disable swoosh api client as it is only required for production adapters. +config :swoosh, :api_client, false diff --git a/web-ng/config/prod.exs b/web-ng/config/prod.exs new file mode 100644 index 000000000..6f3502003 --- /dev/null +++ b/web-ng/config/prod.exs @@ -0,0 +1,31 @@ +import Config + +# Note we also include the path to a cache manifest +# containing the digested version of static files. This +# manifest is generated by the `mix assets.deploy` task, +# which you should run after static files are built and +# before starting your production server. +config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + cache_static_manifest: "priv/static/cache_manifest.json" + +# Force using SSL in production. This also sets the "strict-security-transport" header, +# known as HSTS. If you have a health check endpoint, you may want to exclude it below. +# Note `:force_ssl` is required to be set at compile-time. +config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + force_ssl: [rewrite_on: [:x_forwarded_proto]], + exclude: [ + # paths: ["/health"], + hosts: ["localhost", "127.0.0.1"] + ] + +# Configure Swoosh API Client +config :swoosh, api_client: Swoosh.ApiClient.Req + +# Disable Swoosh Local Memory Storage +config :swoosh, local: false + +# Do not print debug messages in production +config :logger, level: :info + +# Runtime production configuration, including reading +# of environment variables, is done on config/runtime.exs. diff --git a/web-ng/config/runtime.exs b/web-ng/config/runtime.exs new file mode 100644 index 000000000..9f6b6ebfa --- /dev/null +++ b/web-ng/config/runtime.exs @@ -0,0 +1,190 @@ +import Config + +# config/runtime.exs is executed for all environments, including +# during releases. It is executed after compilation and before the +# system starts, so it is typically used to load production configuration +# and secrets from environment variables or elsewhere. Do not define +# any compile-time configuration in here, as it won't be applied. +# The block below contains prod specific runtime configuration. + +# ## Using releases +# +# If you use `mix release`, you need to explicitly enable the server +# by passing the PHX_SERVER=true when you start it: +# +# PHX_SERVER=true bin/serviceradar_web_ng start +# +# Alternatively, you can use `mix phx.gen.release` to generate a `bin/server` +# script that automatically sets the env var above. +if System.get_env("PHX_SERVER") do + config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, server: true +end + +config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + http: [port: String.to_integer(System.get_env("PORT", "4000"))] + +if config_env() == :prod do + database_url = System.get_env("DATABASE_URL") + + cnpg_host = System.get_env("CNPG_HOST") + cnpg_port = String.to_integer(System.get_env("CNPG_PORT", "5432")) + cnpg_database = System.get_env("CNPG_DATABASE", "serviceradar") + cnpg_username = System.get_env("CNPG_USERNAME", "serviceradar") + cnpg_password = System.get_env("CNPG_PASSWORD", "serviceradar") + + cnpg_ssl_mode = System.get_env("CNPG_SSL_MODE", "disable") + cnpg_ssl_enabled = cnpg_ssl_mode != "disable" + cnpg_tls_server_name = System.get_env("CNPG_TLS_SERVER_NAME", cnpg_host || "") + + cnpg_cert_dir = System.get_env("CNPG_CERT_DIR", "") + + cnpg_ca_file = + System.get_env( + "CNPG_CA_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "root.pem"), else: "") + ) + + cnpg_cert_file = + System.get_env( + "CNPG_CERT_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "workstation.pem"), else: "") + ) + + cnpg_key_file = + System.get_env( + "CNPG_KEY_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "workstation-key.pem"), else: "") + ) + + cnpg_verify_peer = cnpg_ssl_mode in ~w(verify-ca verify-full) + + cnpg_ssl_opts = + [verify: if(cnpg_verify_peer, do: :verify_peer, else: :verify_none)] + |> then(fn opts -> + if cnpg_verify_peer and cnpg_ca_file != "" do + Keyword.put(opts, :cacertfile, cnpg_ca_file) + else + opts + end + end) + |> then(fn opts -> + if cnpg_cert_file != "" and cnpg_key_file != "" do + opts + |> Keyword.put(:certfile, cnpg_cert_file) + |> Keyword.put(:keyfile, cnpg_key_file) + else + opts + end + end) + |> then(fn opts -> + if cnpg_ssl_mode == "verify-full" and cnpg_tls_server_name != "" do + opts + |> Keyword.put(:server_name_indication, String.to_charlist(cnpg_tls_server_name)) + |> Keyword.put(:customize_hostname_check, + match_fun: :public_key.pkix_verify_hostname_match_fun(:https) + ) + else + opts + end + end) + + maybe_ipv6 = if System.get_env("ECTO_IPV6") in ~w(true 1), do: [:inet6], else: [] + + repo_url = + cond do + database_url -> + database_url + + cnpg_host -> + "ecto://#{URI.encode_www_form(cnpg_username)}:#{URI.encode_www_form(cnpg_password)}@#{cnpg_host}:#{cnpg_port}/#{cnpg_database}" + + true -> + raise """ + environment variable DATABASE_URL is missing. + For example: ecto://USER:PASS@HOST/DATABASE + """ + end + + config :serviceradar_web_ng, ServiceRadarWebNG.Repo, + url: repo_url, + ssl: if(cnpg_ssl_enabled, do: cnpg_ssl_opts, else: false), + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10"), + socket_options: maybe_ipv6 + + # The secret key base is used to sign/encrypt cookies and other secrets. + # A default value is used in config/dev.exs and config/test.exs but you + # want to use a different value for prod and you most likely don't want + # to check this value into version control, so we use an environment + # variable instead. + secret_key_base = + System.get_env("SECRET_KEY_BASE") || + raise """ + environment variable SECRET_KEY_BASE is missing. + You can generate one by calling: mix phx.gen.secret + """ + + host = System.get_env("PHX_HOST") || "example.com" + + config :serviceradar_web_ng, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY") + + config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + url: [host: host, port: 443, scheme: "https"], + http: [ + # Enable IPv6 and bind on all interfaces. + # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access. + # See the documentation on https://hexdocs.pm/bandit/Bandit.html#t:options/0 + # for details about using IPv6 vs IPv4 and loopback vs public addresses. + ip: {0, 0, 0, 0, 0, 0, 0, 0} + ], + secret_key_base: secret_key_base + + # ## SSL Support + # + # To get SSL working, you will need to add the `https` key + # to your endpoint configuration: + # + # config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + # https: [ + # ..., + # port: 443, + # cipher_suite: :strong, + # keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"), + # certfile: System.get_env("SOME_APP_SSL_CERT_PATH") + # ] + # + # The `cipher_suite` is set to `:strong` to support only the + # latest and more secure SSL ciphers. This means old browsers + # and clients may not be supported. You can set it to + # `:compatible` for wider support. + # + # `:keyfile` and `:certfile` expect an absolute path to the key + # and cert in disk or a relative path inside priv, for example + # "priv/ssl/server.key". For all supported SSL configuration + # options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1 + # + # We also recommend setting `force_ssl` in your config/prod.exs, + # ensuring no data is ever sent via http, always redirecting to https: + # + # config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + # force_ssl: [hsts: true] + # + # Check `Plug.SSL` for all available options in `force_ssl`. + + # ## Configuring the mailer + # + # In production you need to configure the mailer to use a different adapter. + # Here is an example configuration for Mailgun: + # + # config :serviceradar_web_ng, ServiceRadarWebNG.Mailer, + # adapter: Swoosh.Adapters.Mailgun, + # api_key: System.get_env("MAILGUN_API_KEY"), + # domain: System.get_env("MAILGUN_DOMAIN") + # + # Most non-SMTP adapters require an API client. Swoosh supports Req, Hackney, + # and Finch out-of-the-box. This configuration is typically done at + # compile-time in your config/prod.exs: + # + # config :swoosh, :api_client, Swoosh.ApiClient.Req + # + # See https://hexdocs.pm/swoosh/Swoosh.html#module-installation for details. +end diff --git a/web-ng/config/test.exs b/web-ng/config/test.exs new file mode 100644 index 000000000..90f680dda --- /dev/null +++ b/web-ng/config/test.exs @@ -0,0 +1,105 @@ +import Config + +# Only in tests, remove the complexity from the password hashing algorithm +config :bcrypt_elixir, :log_rounds, 1 + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +cnpg_ssl_mode = System.get_env("CNPG_SSL_MODE", "disable") +cnpg_ssl_enabled = cnpg_ssl_mode != "disable" +cnpg_hostname = System.get_env("CNPG_HOST", "localhost") +cnpg_tls_server_name = System.get_env("CNPG_TLS_SERVER_NAME", cnpg_hostname) + +cnpg_cert_dir = System.get_env("CNPG_CERT_DIR", "") + +cnpg_ca_file = + System.get_env( + "CNPG_CA_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "root.pem"), else: "") + ) + +cnpg_cert_file = + System.get_env( + "CNPG_CERT_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "workstation.pem"), else: "") + ) + +cnpg_key_file = + System.get_env( + "CNPG_KEY_FILE", + if(cnpg_cert_dir != "", do: Path.join(cnpg_cert_dir, "workstation-key.pem"), else: "") + ) + +cnpg_verify_peer = cnpg_ssl_mode in ~w(verify-ca verify-full) + +cnpg_ssl_opts = + [verify: if(cnpg_verify_peer, do: :verify_peer, else: :verify_none)] + |> then(fn opts -> + if cnpg_verify_peer and cnpg_ca_file != "" do + Keyword.put(opts, :cacertfile, cnpg_ca_file) + else + opts + end + end) + |> then(fn opts -> + if cnpg_cert_file != "" and cnpg_key_file != "" do + opts + |> Keyword.put(:certfile, cnpg_cert_file) + |> Keyword.put(:keyfile, cnpg_key_file) + else + opts + end + end) + |> then(fn opts -> + if cnpg_ssl_mode == "verify-full" and cnpg_tls_server_name != "" do + opts + |> Keyword.put(:server_name_indication, String.to_charlist(cnpg_tls_server_name)) + |> Keyword.put(:customize_hostname_check, + match_fun: :public_key.pkix_verify_hostname_match_fun(:https) + ) + else + opts + end + end) + +config :serviceradar_web_ng, ServiceRadarWebNG.Repo, + username: System.get_env("TEST_CNPG_USERNAME", System.get_env("CNPG_USERNAME", "postgres")), + password: System.get_env("TEST_CNPG_PASSWORD", System.get_env("CNPG_PASSWORD", "postgres")), + hostname: System.get_env("TEST_CNPG_HOST", System.get_env("CNPG_HOST", "localhost")), + port: String.to_integer(System.get_env("TEST_CNPG_PORT", System.get_env("CNPG_PORT", "5432"))), + database: + System.get_env("TEST_CNPG_DATABASE", System.get_env("CNPG_DATABASE", "serviceradar")) <> + (System.get_env("MIX_TEST_PARTITION") || ""), + ssl: if(cnpg_ssl_enabled, do: cnpg_ssl_opts, else: false), + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: System.schedulers_online() * 2 + +# We don't run a server during test. If one is required, +# you can enable the server option below. +config :serviceradar_web_ng, ServiceRadarWebNGWeb.Endpoint, + http: [ip: {127, 0, 0, 1}, port: 4002], + secret_key_base: "H8DPohD5rFUqGboVqCKLYXrlyofYUJk6k+XBzKEb5G8LN9brhYpNloE3UgxBQmPW", + server: false + +# In test we don't send emails +config :serviceradar_web_ng, ServiceRadarWebNG.Mailer, adapter: Swoosh.Adapters.Test + +# Disable swoosh api client as it is only required for production adapters +config :swoosh, :api_client, false + +# Print only warnings and errors during test +config :logger, level: :warning + +# Initialize plugs at runtime for faster test compilation +config :phoenix, :plug_init_mode, :runtime + +# Enable helpful, but potentially expensive runtime checks +config :phoenix_live_view, + enable_expensive_runtime_checks: true + +# Sort query params output of verified routes for robust url comparisons +config :phoenix, + sort_verified_routes_query_params: true diff --git a/web-ng/lib/mix/tasks/graph/ready.ex b/web-ng/lib/mix/tasks/graph/ready.ex new file mode 100644 index 000000000..d8d01955e --- /dev/null +++ b/web-ng/lib/mix/tasks/graph/ready.ex @@ -0,0 +1,33 @@ +defmodule Mix.Tasks.Graph.Ready do + @moduledoc """ + Verifies CNPG connectivity and Apache AGE graph readiness. + + Run with: `mix graph.ready` + """ + + use Mix.Task + + alias ServiceRadarWebNG.Graph + + @shortdoc "Verifies AGE graph readiness" + + def run(_args) do + Mix.Task.run("app.start") + + Mix.shell().info("Checking Apache AGE graph readiness...") + + case Graph.query("RETURN 1") do + {:ok, _} -> + Mix.shell().info("✓ AGE is reachable and the `serviceradar` graph is queryable") + + {:error, error} -> + Mix.shell().error("✗ Graph query failed: #{inspect(error)}") + + Mix.shell().error( + "Check CNPG_* env vars and that AGE is installed/enabled on the target database" + ) + + exit({:shutdown, 1}) + end + end +end diff --git a/web-ng/lib/serviceradar_web_ng.ex b/web-ng/lib/serviceradar_web_ng.ex new file mode 100644 index 000000000..1fc79c91a --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng.ex @@ -0,0 +1,9 @@ +defmodule ServiceRadarWebNG do + @moduledoc """ + ServiceRadarWebNG keeps the contexts that define your domain + and business logic. + + Contexts are also responsible for managing your data, regardless + if it comes from the database, an external API or others. + """ +end diff --git a/web-ng/lib/serviceradar_web_ng/accounts.ex b/web-ng/lib/serviceradar_web_ng/accounts.ex new file mode 100644 index 000000000..91fb29275 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/accounts.ex @@ -0,0 +1,297 @@ +defmodule ServiceRadarWebNG.Accounts do + @moduledoc """ + The Accounts context. + """ + + import Ecto.Query, warn: false + alias ServiceRadarWebNG.Repo + + alias ServiceRadarWebNG.Accounts.{User, UserToken, UserNotifier} + + ## Database getters + + @doc """ + Gets a user by email. + + ## Examples + + iex> get_user_by_email("foo@example.com") + %User{} + + iex> get_user_by_email("unknown@example.com") + nil + + """ + def get_user_by_email(email) when is_binary(email) do + Repo.get_by(User, email: email) + end + + @doc """ + Gets a user by email and password. + + ## Examples + + iex> get_user_by_email_and_password("foo@example.com", "correct_password") + %User{} + + iex> get_user_by_email_and_password("foo@example.com", "invalid_password") + nil + + """ + def get_user_by_email_and_password(email, password) + when is_binary(email) and is_binary(password) do + user = Repo.get_by(User, email: email) + if User.valid_password?(user, password), do: user + end + + @doc """ + Gets a single user. + + Raises `Ecto.NoResultsError` if the User does not exist. + + ## Examples + + iex> get_user!(123) + %User{} + + iex> get_user!(456) + ** (Ecto.NoResultsError) + + """ + def get_user!(id), do: Repo.get!(User, id) + + ## User registration + + @doc """ + Registers a user. + + ## Examples + + iex> register_user(%{field: value}) + {:ok, %User{}} + + iex> register_user(%{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def register_user(attrs) do + %User{} + |> User.email_changeset(attrs) + |> Repo.insert() + end + + ## Settings + + @doc """ + Checks whether the user is in sudo mode. + + The user is in sudo mode when the last authentication was done no further + than 20 minutes ago. The limit can be given as second argument in minutes. + """ + def sudo_mode?(user, minutes \\ -20) + + def sudo_mode?(%User{authenticated_at: ts}, minutes) when is_struct(ts, DateTime) do + DateTime.after?(ts, DateTime.utc_now() |> DateTime.add(minutes, :minute)) + end + + def sudo_mode?(_user, _minutes), do: false + + @doc """ + Returns an `%Ecto.Changeset{}` for changing the user email. + + See `ServiceRadarWebNG.Accounts.User.email_changeset/3` for a list of supported options. + + ## Examples + + iex> change_user_email(user) + %Ecto.Changeset{data: %User{}} + + """ + def change_user_email(user, attrs \\ %{}, opts \\ []) do + User.email_changeset(user, attrs, opts) + end + + @doc """ + Updates the user email using the given token. + + If the token matches, the user email is updated and the token is deleted. + """ + def update_user_email(user, token) do + context = "change:#{user.email}" + + Repo.transact(fn -> + with {:ok, query} <- UserToken.verify_change_email_token_query(token, context), + %UserToken{sent_to: email} <- Repo.one(query), + {:ok, user} <- Repo.update(User.email_changeset(user, %{email: email})), + {_count, _result} <- + Repo.delete_all(from(UserToken, where: [user_id: ^user.id, context: ^context])) do + {:ok, user} + else + _ -> {:error, :transaction_aborted} + end + end) + end + + @doc """ + Returns an `%Ecto.Changeset{}` for changing the user password. + + See `ServiceRadarWebNG.Accounts.User.password_changeset/3` for a list of supported options. + + ## Examples + + iex> change_user_password(user) + %Ecto.Changeset{data: %User{}} + + """ + def change_user_password(user, attrs \\ %{}, opts \\ []) do + User.password_changeset(user, attrs, opts) + end + + @doc """ + Updates the user password. + + Returns a tuple with the updated user, as well as a list of expired tokens. + + ## Examples + + iex> update_user_password(user, %{password: ...}) + {:ok, {%User{}, [...]}} + + iex> update_user_password(user, %{password: "too short"}) + {:error, %Ecto.Changeset{}} + + """ + def update_user_password(user, attrs) do + user + |> User.password_changeset(attrs) + |> update_user_and_delete_all_tokens() + end + + ## Session + + @doc """ + Generates a session token. + """ + def generate_user_session_token(user) do + {token, user_token} = UserToken.build_session_token(user) + Repo.insert!(user_token) + token + end + + @doc """ + Gets the user with the given signed token. + + If the token is valid `{user, token_inserted_at}` is returned, otherwise `nil` is returned. + """ + def get_user_by_session_token(token) do + {:ok, query} = UserToken.verify_session_token_query(token) + Repo.one(query) + end + + @doc """ + Gets the user with the given magic link token. + """ + def get_user_by_magic_link_token(token) do + with {:ok, query} <- UserToken.verify_magic_link_token_query(token), + {user, _token} <- Repo.one(query) do + user + else + _ -> nil + end + end + + @doc """ + Logs the user in by magic link. + + There are three cases to consider: + + 1. The user has already confirmed their email. They are logged in + and the magic link is expired. + + 2. The user has not confirmed their email and no password is set. + In this case, the user gets confirmed, logged in, and all tokens - + including session ones - are expired. In theory, no other tokens + exist but we delete all of them for best security practices. + + 3. The user has not confirmed their email but a password is set. + This cannot happen in the default implementation but may be the + source of security pitfalls. See the "Mixing magic link and password registration" section of + `mix help phx.gen.auth`. + """ + def login_user_by_magic_link(token) do + {:ok, query} = UserToken.verify_magic_link_token_query(token) + + case Repo.one(query) do + # Prevent session fixation attacks by disallowing magic links for unconfirmed users with password + {%User{confirmed_at: nil, hashed_password: hash}, _token} when not is_nil(hash) -> + raise """ + magic link log in is not allowed for unconfirmed users with a password set! + + This cannot happen with the default implementation, which indicates that you + might have adapted the code to a different use case. Please make sure to read the + "Mixing magic link and password registration" section of `mix help phx.gen.auth`. + """ + + {%User{confirmed_at: nil} = user, _token} -> + user + |> User.confirm_changeset() + |> update_user_and_delete_all_tokens() + + {user, token} -> + Repo.delete!(token) + {:ok, {user, []}} + + nil -> + {:error, :not_found} + end + end + + @doc ~S""" + Delivers the update email instructions to the given user. + + ## Examples + + iex> deliver_user_update_email_instructions(user, current_email, &url(~p"/users/settings/confirm-email/#{&1}")) + {:ok, %{to: ..., body: ...}} + + """ + def deliver_user_update_email_instructions(%User{} = user, current_email, update_email_url_fun) + when is_function(update_email_url_fun, 1) do + {encoded_token, user_token} = UserToken.build_email_token(user, "change:#{current_email}") + + Repo.insert!(user_token) + UserNotifier.deliver_update_email_instructions(user, update_email_url_fun.(encoded_token)) + end + + @doc """ + Delivers the magic link login instructions to the given user. + """ + def deliver_login_instructions(%User{} = user, magic_link_url_fun) + when is_function(magic_link_url_fun, 1) do + {encoded_token, user_token} = UserToken.build_email_token(user, "login") + Repo.insert!(user_token) + UserNotifier.deliver_login_instructions(user, magic_link_url_fun.(encoded_token)) + end + + @doc """ + Deletes the signed token with the given context. + """ + def delete_user_session_token(token) do + Repo.delete_all(from(UserToken, where: [token: ^token, context: "session"])) + :ok + end + + ## Token helper + + defp update_user_and_delete_all_tokens(changeset) do + Repo.transact(fn -> + with {:ok, user} <- Repo.update(changeset) do + tokens_to_expire = Repo.all_by(UserToken, user_id: user.id) + + Repo.delete_all(from(t in UserToken, where: t.id in ^Enum.map(tokens_to_expire, & &1.id))) + + {:ok, {user, tokens_to_expire}} + end + end) + end +end diff --git a/web-ng/lib/serviceradar_web_ng/accounts/scope.ex b/web-ng/lib/serviceradar_web_ng/accounts/scope.ex new file mode 100644 index 000000000..4a80f86f5 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/accounts/scope.ex @@ -0,0 +1,33 @@ +defmodule ServiceRadarWebNG.Accounts.Scope do + @moduledoc """ + Defines the scope of the caller to be used throughout the app. + + The `ServiceRadarWebNG.Accounts.Scope` allows public interfaces to receive + information about the caller, such as if the call is initiated from an + end-user, and if so, which user. Additionally, such a scope can carry fields + such as "super user" or other privileges for use as authorization, or to + ensure specific code paths can only be access for a given scope. + + It is useful for logging as well as for scoping pubsub subscriptions and + broadcasts when a caller subscribes to an interface or performs a particular + action. + + Feel free to extend the fields on this struct to fit the needs of + growing application requirements. + """ + + alias ServiceRadarWebNG.Accounts.User + + defstruct user: nil + + @doc """ + Creates a scope for the given user. + + Returns nil if no user is given. + """ + def for_user(%User{} = user) do + %__MODULE__{user: user} + end + + def for_user(nil), do: nil +end diff --git a/web-ng/lib/serviceradar_web_ng/accounts/user.ex b/web-ng/lib/serviceradar_web_ng/accounts/user.ex new file mode 100644 index 000000000..b7766ae88 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/accounts/user.ex @@ -0,0 +1,136 @@ +defmodule ServiceRadarWebNG.Accounts.User do + use Ecto.Schema + import Ecto.Changeset + + schema "ng_users" do + field :email, :string + field :password, :string, virtual: true, redact: true + field :hashed_password, :string, redact: true + field :confirmed_at, :utc_datetime + field :authenticated_at, :utc_datetime, virtual: true + + timestamps(type: :utc_datetime) + end + + @doc """ + A user changeset for registering or changing the email. + + It requires the email to change otherwise an error is added. + + ## Options + + * `:validate_unique` - Set to false if you don't want to validate the + uniqueness of the email, useful when displaying live validations. + Defaults to `true`. + """ + def email_changeset(user, attrs, opts \\ []) do + user + |> cast(attrs, [:email]) + |> validate_email(opts) + end + + defp validate_email(changeset, opts) do + changeset = + changeset + |> validate_required([:email]) + |> update_change(:email, &(&1 |> String.trim() |> String.downcase())) + |> validate_format(:email, ~r/^[^@,;\s]+@[^@,;\s]+$/, + message: "must have the @ sign and no spaces" + ) + |> validate_length(:email, max: 160) + + if Keyword.get(opts, :validate_unique, true) do + changeset + |> unsafe_validate_unique(:email, ServiceRadarWebNG.Repo) + |> unique_constraint(:email) + |> validate_email_changed() + else + changeset + end + end + + defp validate_email_changed(changeset) do + if get_field(changeset, :email) && get_change(changeset, :email) == nil do + add_error(changeset, :email, "did not change") + else + changeset + end + end + + @doc """ + A user changeset for changing the password. + + It is important to validate the length of the password, as long passwords may + be very expensive to hash for certain algorithms. + + ## Options + + * `:hash_password` - Hashes the password so it can be stored securely + in the database and ensures the password field is cleared to prevent + leaks in the logs. If password hashing is not needed and clearing the + password field is not desired (like when using this changeset for + validations on a LiveView form), this option can be set to `false`. + Defaults to `true`. + """ + def password_changeset(user, attrs, opts \\ []) do + user + |> cast(attrs, [:password]) + |> validate_confirmation(:password, message: "does not match password") + |> validate_password(opts) + end + + defp validate_password(changeset, opts) do + changeset + |> validate_required([:password]) + |> validate_length(:password, min: 12, max: 72) + # Examples of additional password validation: + # |> validate_format(:password, ~r/[a-z]/, message: "at least one lower case character") + # |> validate_format(:password, ~r/[A-Z]/, message: "at least one upper case character") + # |> validate_format(:password, ~r/[!?@#$%^&*_0-9]/, message: "at least one digit or punctuation character") + |> maybe_hash_password(opts) + end + + defp maybe_hash_password(changeset, opts) do + hash_password? = Keyword.get(opts, :hash_password, true) + password = get_change(changeset, :password) + + if hash_password? && password && changeset.valid? do + changeset + # If using Bcrypt, then further validate it is at most 72 bytes long + |> validate_length(:password, max: 72, count: :bytes) + # Hashing could be done with `Ecto.Changeset.prepare_changes/2`, but that + # would keep the database transaction open longer and hurt performance. + |> put_change(:hashed_password, Bcrypt.hash_pwd_salt(password)) + |> delete_change(:password) + else + changeset + end + end + + @doc """ + Confirms the account by setting `confirmed_at`. + """ + def confirm_changeset(user) do + now = DateTime.utc_now(:second) + change(user, confirmed_at: now) + end + + @doc """ + Verifies the password. + + If there is no user or the user doesn't have a password, we call + `Bcrypt.no_user_verify/0` to avoid timing attacks. + """ + def valid_password?( + %ServiceRadarWebNG.Accounts.User{hashed_password: hashed_password}, + password + ) + when is_binary(hashed_password) and byte_size(password) > 0 do + Bcrypt.verify_pass(password, hashed_password) + end + + def valid_password?(_, _) do + Bcrypt.no_user_verify() + false + end +end diff --git a/web-ng/lib/serviceradar_web_ng/accounts/user_notifier.ex b/web-ng/lib/serviceradar_web_ng/accounts/user_notifier.ex new file mode 100644 index 000000000..410d5ca02 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/accounts/user_notifier.ex @@ -0,0 +1,84 @@ +defmodule ServiceRadarWebNG.Accounts.UserNotifier do + import Swoosh.Email + + alias ServiceRadarWebNG.Mailer + alias ServiceRadarWebNG.Accounts.User + + # Delivers the email using the application mailer. + defp deliver(recipient, subject, body) do + email = + new() + |> to(recipient) + |> from({"ServiceRadarWebNG", "contact@example.com"}) + |> subject(subject) + |> text_body(body) + + with {:ok, _metadata} <- Mailer.deliver(email) do + {:ok, email} + end + end + + @doc """ + Deliver instructions to update a user email. + """ + def deliver_update_email_instructions(user, url) do + deliver(user.email, "Update email instructions", """ + + ============================== + + Hi #{user.email}, + + You can change your email by visiting the URL below: + + #{url} + + If you didn't request this change, please ignore this. + + ============================== + """) + end + + @doc """ + Deliver instructions to log in with a magic link. + """ + def deliver_login_instructions(user, url) do + case user do + %User{confirmed_at: nil} -> deliver_confirmation_instructions(user, url) + _ -> deliver_magic_link_instructions(user, url) + end + end + + defp deliver_magic_link_instructions(user, url) do + deliver(user.email, "Log in instructions", """ + + ============================== + + Hi #{user.email}, + + You can log into your account by visiting the URL below: + + #{url} + + If you didn't request this email, please ignore this. + + ============================== + """) + end + + defp deliver_confirmation_instructions(user, url) do + deliver(user.email, "Confirmation instructions", """ + + ============================== + + Hi #{user.email}, + + You can confirm your account by visiting the URL below: + + #{url} + + If you didn't create an account with us, please ignore this. + + ============================== + """) + end +end diff --git a/web-ng/lib/serviceradar_web_ng/accounts/user_token.ex b/web-ng/lib/serviceradar_web_ng/accounts/user_token.ex new file mode 100644 index 000000000..f6cd768cb --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/accounts/user_token.ex @@ -0,0 +1,156 @@ +defmodule ServiceRadarWebNG.Accounts.UserToken do + use Ecto.Schema + import Ecto.Query + alias ServiceRadarWebNG.Accounts.UserToken + + @hash_algorithm :sha256 + @rand_size 32 + + # It is very important to keep the magic link token expiry short, + # since someone with access to the email may take over the account. + @magic_link_validity_in_minutes 15 + @change_email_validity_in_days 7 + @session_validity_in_days 14 + + schema "ng_users_tokens" do + field :token, :binary + field :context, :string + field :sent_to, :string + field :authenticated_at, :utc_datetime + belongs_to :user, ServiceRadarWebNG.Accounts.User + + timestamps(type: :utc_datetime, updated_at: false) + end + + @doc """ + Generates a token that will be stored in a signed place, + such as session or cookie. As they are signed, those + tokens do not need to be hashed. + + The reason why we store session tokens in the database, even + though Phoenix already provides a session cookie, is because + Phoenix's default session cookies are not persisted, they are + simply signed and potentially encrypted. This means they are + valid indefinitely, unless you change the signing/encryption + salt. + + Therefore, storing them allows individual user + sessions to be expired. The token system can also be extended + to store additional data, such as the device used for logging in. + You could then use this information to display all valid sessions + and devices in the UI and allow users to explicitly expire any + session they deem invalid. + """ + def build_session_token(user) do + token = :crypto.strong_rand_bytes(@rand_size) + dt = user.authenticated_at || DateTime.utc_now(:second) + {token, %UserToken{token: token, context: "session", user_id: user.id, authenticated_at: dt}} + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + The query returns the user found by the token, if any, along with the token's creation time. + + The token is valid if it matches the value in the database and it has + not expired (after @session_validity_in_days). + """ + def verify_session_token_query(token) do + query = + from token in by_token_and_context_query(token, "session"), + join: user in assoc(token, :user), + where: token.inserted_at > ago(@session_validity_in_days, "day"), + select: {%{user | authenticated_at: token.authenticated_at}, token.inserted_at} + + {:ok, query} + end + + @doc """ + Builds a token and its hash to be delivered to the user's email. + + The non-hashed token is sent to the user email while the + hashed part is stored in the database. The original token cannot be reconstructed, + which means anyone with read-only access to the database cannot directly use + the token in the application to gain access. Furthermore, if the user changes + their email in the system, the tokens sent to the previous email are no longer + valid. + + Users can easily adapt the existing code to provide other types of delivery methods, + for example, by phone numbers. + """ + def build_email_token(user, context) do + build_hashed_token(user, context, user.email) + end + + defp build_hashed_token(user, context, sent_to) do + token = :crypto.strong_rand_bytes(@rand_size) + hashed_token = :crypto.hash(@hash_algorithm, token) + + {Base.url_encode64(token, padding: false), + %UserToken{ + token: hashed_token, + context: context, + sent_to: sent_to, + user_id: user.id + }} + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + If found, the query returns a tuple of the form `{user, token}`. + + The given token is valid if it matches its hashed counterpart in the + database. This function also checks if the token is being used within + 15 minutes. The context of a magic link token is always "login". + """ + def verify_magic_link_token_query(token) do + case Base.url_decode64(token, padding: false) do + {:ok, decoded_token} -> + hashed_token = :crypto.hash(@hash_algorithm, decoded_token) + + query = + from token in by_token_and_context_query(hashed_token, "login"), + join: user in assoc(token, :user), + where: token.inserted_at > ago(^@magic_link_validity_in_minutes, "minute"), + where: token.sent_to == user.email, + select: {user, token} + + {:ok, query} + + :error -> + :error + end + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + The query returns the user_token found by the token, if any. + + This is used to validate requests to change the user + email. + The given token is valid if it matches its hashed counterpart in the + database and if it has not expired (after @change_email_validity_in_days). + The context must always start with "change:". + """ + def verify_change_email_token_query(token, "change:" <> _ = context) do + case Base.url_decode64(token, padding: false) do + {:ok, decoded_token} -> + hashed_token = :crypto.hash(@hash_algorithm, decoded_token) + + query = + from token in by_token_and_context_query(hashed_token, context), + where: token.inserted_at > ago(@change_email_validity_in_days, "day") + + {:ok, query} + + :error -> + :error + end + end + + defp by_token_and_context_query(token, context) do + from UserToken, where: [token: ^token, context: ^context] + end +end diff --git a/web-ng/lib/serviceradar_web_ng/api/device_controller.ex b/web-ng/lib/serviceradar_web_ng/api/device_controller.ex new file mode 100644 index 000000000..82b45923d --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/api/device_controller.ex @@ -0,0 +1,282 @@ +defmodule ServiceRadarWebNG.Api.DeviceController do + use ServiceRadarWebNGWeb, :controller + + import Ecto.Query, only: [from: 2, dynamic: 2] + + alias ServiceRadarWebNG.Inventory.Device + alias ServiceRadarWebNG.Repo + + @default_limit 100 + @max_limit 500 + @max_offset 100_000 + + def index(conn, params) do + with {:ok, opts} <- parse_index_params(params) do + devices = list_devices(opts) + + json(conn, %{ + "data" => Enum.map(devices, &device_to_map/1), + "pagination" => build_pagination(devices, opts) + }) + else + {:error, reason} -> + conn + |> put_status(:bad_request) + |> json(%{"error" => reason}) + end + end + + def show(conn, %{"device_id" => device_id}) do + with {:ok, device_id} <- parse_device_id(device_id) do + case Repo.get(Device, device_id) do + %Device{} = device -> + json(conn, %{"data" => device_to_map(device)}) + + nil -> + conn + |> put_status(:not_found) + |> json(%{"error" => "device not found"}) + end + else + {:error, reason} -> + conn + |> put_status(:bad_request) + |> json(%{"error" => reason}) + end + end + + def show(conn, _params) do + conn + |> put_status(:bad_request) + |> json(%{"error" => "missing required path param: device_id"}) + end + + defp list_devices(opts) do + query = from(d in Device, select: d) + query = maybe_apply_search(query, Map.get(opts, :search)) + query = maybe_apply_status(query, Map.get(opts, :status)) + query = maybe_apply_poller_id(query, Map.get(opts, :poller_id)) + query = maybe_apply_device_type(query, Map.get(opts, :device_type)) + + query = + from(d in query, + order_by: [desc: d.last_seen], + limit: ^opts.limit, + offset: ^opts.offset + ) + + Repo.all(query) + end + + defp parse_index_params(params) when is_map(params) do + with {:ok, limit} <- parse_limit(Map.get(params, "limit"), @default_limit), + {:ok, offset} <- parse_offset(params, limit), + {:ok, search} <- parse_optional_string(Map.get(params, "search")), + {:ok, status} <- parse_status(Map.get(params, "status")), + {:ok, poller_id} <- parse_optional_string(Map.get(params, "poller_id")), + {:ok, device_type} <- parse_optional_string(Map.get(params, "device_type")) do + {:ok, + %{ + limit: limit, + offset: offset, + search: search, + status: status, + poller_id: poller_id, + device_type: device_type + }} + end + end + + defp parse_index_params(_), do: {:error, "invalid query params"} + + defp parse_limit(nil, default), do: {:ok, default} + defp parse_limit("", default), do: {:ok, default} + + defp parse_limit(limit, _default) when is_integer(limit) and limit > 0 do + {:ok, min(limit, @max_limit)} + end + + defp parse_limit(limit, default) when is_binary(limit) do + case Integer.parse(String.trim(limit)) do + {value, ""} when value > 0 -> parse_limit(value, default) + _ -> {:error, "invalid limit"} + end + end + + defp parse_limit(_limit, _default), do: {:error, "invalid limit"} + + defp parse_offset(params, limit) when is_map(params) and is_integer(limit) do + offset = Map.get(params, "offset") + page = Map.get(params, "page") + + cond do + not is_nil(offset) -> + parse_offset_value(offset) + + not is_nil(page) -> + with {:ok, page} <- parse_page(page) do + parse_offset_value((page - 1) * limit) + end + + true -> + {:ok, 0} + end + end + + defp parse_offset(_params, _limit), do: {:error, "invalid pagination params"} + + defp parse_offset_value(value) when is_integer(value) and value >= 0 do + if value <= @max_offset, do: {:ok, value}, else: {:error, "offset too large"} + end + + defp parse_offset_value(value) when is_binary(value) do + case Integer.parse(String.trim(value)) do + {value, ""} -> parse_offset_value(value) + _ -> {:error, "invalid offset"} + end + end + + defp parse_offset_value(_), do: {:error, "invalid offset"} + + defp parse_page(value) when is_integer(value) and value > 0, do: {:ok, value} + + defp parse_page(value) when is_binary(value) do + case Integer.parse(String.trim(value)) do + {value, ""} when value > 0 -> {:ok, value} + _ -> {:error, "invalid page"} + end + end + + defp parse_page(_), do: {:error, "invalid page"} + + defp parse_optional_string(nil), do: {:ok, nil} + + defp parse_optional_string(value) when is_binary(value) do + value = + value + |> String.trim() + |> String.slice(0, 200) + + if value == "", do: {:ok, nil}, else: {:ok, value} + end + + defp parse_optional_string(value) when is_integer(value), do: {:ok, Integer.to_string(value)} + defp parse_optional_string(value) when is_atom(value), do: {:ok, Atom.to_string(value)} + defp parse_optional_string(_), do: {:error, "invalid string param"} + + defp parse_status(nil), do: {:ok, nil} + defp parse_status(""), do: {:ok, nil} + + defp parse_status(value) when is_binary(value) do + value = value |> String.downcase() |> String.trim() + + case value do + "online" -> {:ok, :online} + "offline" -> {:ok, :offline} + "available" -> {:ok, :online} + "unavailable" -> {:ok, :offline} + other -> {:error, "invalid status: #{other}"} + end + end + + defp parse_status(_), do: {:error, "invalid status"} + + defp parse_device_id(value) when is_binary(value) do + value = String.trim(value) + + cond do + value == "" -> + {:error, "invalid device_id"} + + String.length(value) > 200 -> + {:error, "invalid device_id"} + + Regex.match?(~r/^[A-Za-z0-9][A-Za-z0-9:._-]*$/, value) -> + {:ok, value} + + true -> + {:error, "invalid device_id"} + end + end + + defp parse_device_id(_), do: {:error, "invalid device_id"} + + defp maybe_apply_search(query, nil), do: query + + defp maybe_apply_search(query, search) when is_binary(search) do + like = "%#{escape_like(search)}%" + + where = dynamic([d], ilike(d.hostname, ^like) or ilike(d.ip, ^like) or ilike(d.id, ^like)) + + from(d in query, where: ^where) + end + + defp maybe_apply_search(query, _), do: query + + defp maybe_apply_status(query, nil), do: query + defp maybe_apply_status(query, :online), do: from(d in query, where: d.is_available == true) + defp maybe_apply_status(query, :offline), do: from(d in query, where: d.is_available == false) + defp maybe_apply_status(query, _), do: query + + defp maybe_apply_poller_id(query, nil), do: query + + defp maybe_apply_poller_id(query, poller_id) when is_binary(poller_id) do + from(d in query, where: d.poller_id == ^poller_id) + end + + defp maybe_apply_poller_id(query, _), do: query + + defp maybe_apply_device_type(query, nil), do: query + + defp maybe_apply_device_type(query, device_type) when is_binary(device_type) do + from(d in query, where: d.device_type == ^device_type) + end + + defp maybe_apply_device_type(query, _), do: query + + defp escape_like(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("%", "\\%") + |> String.replace("_", "\\_") + end + + defp build_pagination(devices, %{limit: limit, offset: offset}) do + next_offset = if length(devices) >= limit, do: offset + limit, else: nil + + %{ + "limit" => limit, + "offset" => offset, + "next_offset" => next_offset + } + end + + defp device_to_map(%Device{} = device) do + %{ + "device_id" => device.id, + "hostname" => device.hostname, + "ip" => device.ip, + "poller_id" => device.poller_id, + "agent_id" => device.agent_id, + "mac" => device.mac, + "discovery_sources" => device.discovery_sources, + "is_available" => device.is_available, + "first_seen" => normalize_value(device.first_seen), + "last_seen" => normalize_value(device.last_seen), + "metadata" => device.metadata, + "device_type" => device.device_type, + "service_type" => device.service_type, + "service_status" => device.service_status, + "last_heartbeat" => normalize_value(device.last_heartbeat), + "os_info" => device.os_info, + "version_info" => device.version_info, + "updated_at" => normalize_value(device.updated_at) + } + end + + defp normalize_value(%DateTime{} = value), do: DateTime.to_iso8601(value) + defp normalize_value(%NaiveDateTime{} = value), do: NaiveDateTime.to_iso8601(value) + defp normalize_value(%Date{} = value), do: Date.to_iso8601(value) + defp normalize_value(%Time{} = value), do: Time.to_iso8601(value) + defp normalize_value(value), do: value +end diff --git a/web-ng/lib/serviceradar_web_ng/api/query_controller.ex b/web-ng/lib/serviceradar_web_ng/api/query_controller.ex new file mode 100644 index 000000000..db2ec94f4 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/api/query_controller.ex @@ -0,0 +1,19 @@ +defmodule ServiceRadarWebNG.Api.QueryController do + use ServiceRadarWebNGWeb, :controller + + def execute(conn, params) do + case srql_module().query_request(params) do + {:ok, response} -> + json(conn, response) + + {:error, reason} -> + conn + |> put_status(:bad_request) + |> json(%{"error" => to_string(reason)}) + end + end + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end +end diff --git a/web-ng/lib/serviceradar_web_ng/application.ex b/web-ng/lib/serviceradar_web_ng/application.ex new file mode 100644 index 000000000..bcbb0e246 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/application.ex @@ -0,0 +1,35 @@ +defmodule ServiceRadarWebNG.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + @impl true + def start(_type, _args) do + children = [ + ServiceRadarWebNGWeb.Telemetry, + ServiceRadarWebNG.Repo, + {DNSCluster, + query: Application.get_env(:serviceradar_web_ng, :dns_cluster_query) || :ignore}, + {Phoenix.PubSub, name: ServiceRadarWebNG.PubSub}, + # Start a worker by calling: ServiceRadarWebNG.Worker.start_link(arg) + # {ServiceRadarWebNG.Worker, arg}, + # Start to serve requests, typically the last entry + ServiceRadarWebNGWeb.Endpoint + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: ServiceRadarWebNG.Supervisor] + Supervisor.start_link(children, opts) + end + + # Tell Phoenix to update the endpoint configuration + # whenever the application is updated. + @impl true + def config_change(changed, _new, removed) do + ServiceRadarWebNGWeb.Endpoint.config_change(changed, removed) + :ok + end +end diff --git a/web-ng/lib/serviceradar_web_ng/edge.ex b/web-ng/lib/serviceradar_web_ng/edge.ex new file mode 100644 index 000000000..04d1938c7 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/edge.ex @@ -0,0 +1,11 @@ +defmodule ServiceRadarWebNG.Edge do + alias ServiceRadarWebNG.Edge.OnboardingToken + + def encode_onboarding_token(package_id, download_token, core_api_url \\ nil) do + OnboardingToken.encode(package_id, download_token, core_api_url) + end + + def decode_onboarding_token(token) do + OnboardingToken.decode(token) + end +end diff --git a/web-ng/lib/serviceradar_web_ng/edge/onboarding_package.ex b/web-ng/lib/serviceradar_web_ng/edge/onboarding_package.ex new file mode 100644 index 000000000..ab5e999cb --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/edge/onboarding_package.ex @@ -0,0 +1,41 @@ +defmodule ServiceRadarWebNG.Edge.OnboardingPackage do + use Ecto.Schema + + @primary_key {:id, :binary_id, autogenerate: false, source: :package_id} + @derive {Phoenix.Param, key: :id} + schema "edge_onboarding_packages" do + field :label, :string + field :component_id, :string + field :component_type, :string + field :parent_type, :string + field :parent_id, :string + field :poller_id, :string + field :site, :string + field :status, :string + field :security_mode, :string + field :downstream_entry_id, :string + field :downstream_spiffe_id, :string + field :selectors, {:array, :string} + field :checker_kind, :string + field :checker_config_json, :map + field :join_token_ciphertext, :string + field :join_token_expires_at, :utc_datetime + field :bundle_ciphertext, :string + field :download_token_hash, :string + field :download_token_expires_at, :utc_datetime + field :created_by, :string + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + field :delivered_at, :utc_datetime + field :activated_at, :utc_datetime + field :activated_from_ip, :string + field :last_seen_spiffe_id, :string + field :revoked_at, :utc_datetime + field :deleted_at, :utc_datetime + field :deleted_by, :string + field :deleted_reason, :string + field :metadata_json, :map + field :kv_revision, :integer + field :notes, :string + end +end diff --git a/web-ng/lib/serviceradar_web_ng/edge/onboarding_token.ex b/web-ng/lib/serviceradar_web_ng/edge/onboarding_token.ex new file mode 100644 index 000000000..1ed0862dc --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/edge/onboarding_token.ex @@ -0,0 +1,82 @@ +defmodule ServiceRadarWebNG.Edge.OnboardingToken do + @moduledoc false + + @token_prefix "edgepkg-v1:" + + @type payload :: %{ + required(:pkg) => String.t(), + required(:dl) => String.t(), + optional(:api) => String.t() + } + + def encode(package_id, download_token, core_api_url \\ nil) do + payload = + %{pkg: normalize_required_string(package_id), dl: normalize_required_string(download_token)} + |> maybe_put_api(core_api_url) + + with :ok <- validate_payload(payload), + {:ok, json} <- Jason.encode(payload) do + {:ok, @token_prefix <> Base.url_encode64(json, padding: false)} + end + end + + def decode(raw) when is_binary(raw) do + raw = String.trim(raw) + + with true <- String.starts_with?(raw, @token_prefix), + encoded <- String.replace_prefix(raw, @token_prefix, ""), + {:ok, json} <- Base.url_decode64(encoded, padding: false), + {:ok, payload} <- Jason.decode(json), + payload <- atomize_payload(payload), + :ok <- validate_payload(payload) do + {:ok, payload} + else + false -> {:error, :unsupported_token_format} + :error -> {:error, :invalid_base64} + {:error, %Jason.DecodeError{}} -> {:error, :invalid_json} + {:error, _} = error -> error + end + end + + def decode(_), do: {:error, :unsupported_token_format} + + defp maybe_put_api(payload, nil), do: payload + + defp maybe_put_api(payload, api) when is_binary(api) do + api = String.trim(api) + if api == "", do: payload, else: Map.put(payload, :api, api) + end + + defp maybe_put_api(payload, _), do: payload + + defp atomize_payload(%{} = payload) do + %{ + pkg: Map.get(payload, "pkg", ""), + dl: Map.get(payload, "dl", ""), + api: Map.get(payload, "api") + } + |> Enum.reject(fn {_k, v} -> is_nil(v) end) + |> Map.new() + end + + defp validate_payload(%{pkg: pkg, dl: dl} = payload) do + cond do + not is_binary(pkg) or pkg == "" -> + {:error, :missing_package_id} + + not is_binary(dl) or dl == "" -> + {:error, :missing_download_token} + + Map.has_key?(payload, :api) and not is_binary(payload.api) -> + {:error, :invalid_core_api_url} + + true -> + :ok + end + end + + defp validate_payload(_), do: {:error, :invalid_payload} + + defp normalize_required_string(value) when is_binary(value), do: String.trim(value) + defp normalize_required_string(_), do: "" +end diff --git a/web-ng/lib/serviceradar_web_ng/graph.ex b/web-ng/lib/serviceradar_web_ng/graph.ex new file mode 100644 index 000000000..222d96399 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/graph.ex @@ -0,0 +1,104 @@ +defmodule ServiceRadarWebNG.Graph do + @moduledoc """ + Graph query interface for executing openCypher queries against Apache AGE. + + ServiceRadar uses the `serviceradar` AGE graph (see `docs/docs/age-graph-schema.md`). + """ + + alias ServiceRadarWebNG.Repo + import Ecto.Adapters.SQL, only: [query: 4] + + @graph_name "serviceradar" + @age_search_path ~S(ag_catalog,pg_catalog,"$user",public) + + @doc """ + Executes an openCypher query and returns the raw `Postgrex.Result`. + + Returns `{:ok, result}` or `{:error, reason}`. + """ + def cypher(cypher_query) when is_binary(cypher_query) do + Repo.transaction(fn -> + {:ok, _} = query(Repo, "LOAD 'age'", [], []) + {:ok, _} = query(Repo, "SET search_path = #{@age_search_path}", [], []) + + sql_query = """ + SELECT ag_catalog.agtype_to_text(result) as result + FROM ag_catalog.cypher('#{@graph_name}', #{dollar_quote(cypher_query)}) as (result ag_catalog.agtype) + """ + + result = query(Repo, sql_query, [], []) + + case result do + {:ok, data} -> data + {:error, error} -> Repo.rollback(error) + end + end) + end + + @doc """ + Executes an openCypher query and parses each returned row into an Elixir value. + """ + def query(cypher_query) when is_binary(cypher_query) do + case cypher(cypher_query) do + {:ok, result} -> + parsed_rows = parse_agtype_results(result.rows) + {:ok, parsed_rows} + + {:error, error} -> + {:error, error} + end + end + + defp dollar_quote(query) do + tag = dollar_quote_tag(query) + "$#{tag}$#{query}$#{tag}$" + end + + defp dollar_quote_tag(query) do + tag = "sr_#{Base.encode16(:crypto.strong_rand_bytes(6), case: :lower)}" + + if String.contains?(query, "$#{tag}$") do + dollar_quote_tag(query) + else + tag + end + end + + defp parse_agtype_results(rows) do + Enum.map(rows, fn + [text_value] when is_binary(text_value) -> + case Jason.decode(text_value) do + {:ok, parsed} -> + parsed + + {:error, _} -> + parse_scalar(text_value) + end + + row -> + row + end) + end + + defp parse_scalar(text_value) do + cond do + text_value == "true" -> true + text_value == "false" -> false + text_value == "null" -> nil + true -> parse_number(text_value) + end + end + + defp parse_number(text_value) do + case Integer.parse(text_value) do + {int, ""} -> + int + + _ -> + case Float.parse(text_value) do + {float, ""} -> float + _ -> text_value + end + end + end +end diff --git a/web-ng/lib/serviceradar_web_ng/infrastructure.ex b/web-ng/lib/serviceradar_web_ng/infrastructure.ex new file mode 100644 index 000000000..641cf18de --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/infrastructure.ex @@ -0,0 +1,19 @@ +defmodule ServiceRadarWebNG.Infrastructure do + import Ecto.Query, only: [from: 2] + + alias ServiceRadarWebNG.Infrastructure.Poller + alias ServiceRadarWebNG.Repo + + def list_pollers(opts \\ []) do + limit = Keyword.get(opts, :limit, 200) + offset = Keyword.get(opts, :offset, 0) + + Repo.all( + from(p in Poller, + order_by: [desc: p.last_seen], + limit: ^limit, + offset: ^offset + ) + ) + end +end diff --git a/web-ng/lib/serviceradar_web_ng/infrastructure/poller.ex b/web-ng/lib/serviceradar_web_ng/infrastructure/poller.ex new file mode 100644 index 000000000..c061c4058 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/infrastructure/poller.ex @@ -0,0 +1,21 @@ +defmodule ServiceRadarWebNG.Infrastructure.Poller do + use Ecto.Schema + + @primary_key {:id, :string, autogenerate: false, source: :poller_id} + @derive {Phoenix.Param, key: :id} + schema "pollers" do + field :component_id, :string + field :registration_source, :string + field :status, :string + field :spiffe_identity, :string + field :first_registered, :utc_datetime + field :first_seen, :utc_datetime + field :last_seen, :utc_datetime + field :metadata, :map + field :created_by, :string + field :is_healthy, :boolean + field :agent_count, :integer + field :checker_count, :integer + field :updated_at, :utc_datetime + end +end diff --git a/web-ng/lib/serviceradar_web_ng/infrastructure/service.ex b/web-ng/lib/serviceradar_web_ng/infrastructure/service.ex new file mode 100644 index 000000000..624b5d4d2 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/infrastructure/service.ex @@ -0,0 +1,15 @@ +defmodule ServiceRadarWebNG.Infrastructure.Service do + use Ecto.Schema + + @primary_key false + schema "services" do + field :timestamp, :utc_datetime + field :poller_id, :string + field :agent_id, :string + field :service_name, :string + field :service_type, :string + field :config, :map + field :partition, :string + field :created_at, :utc_datetime + end +end diff --git a/web-ng/lib/serviceradar_web_ng/inventory.ex b/web-ng/lib/serviceradar_web_ng/inventory.ex new file mode 100644 index 000000000..bb326f70c --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/inventory.ex @@ -0,0 +1,19 @@ +defmodule ServiceRadarWebNG.Inventory do + import Ecto.Query, only: [from: 2] + + alias ServiceRadarWebNG.Inventory.Device + alias ServiceRadarWebNG.Repo + + def list_devices(opts \\ []) do + limit = Keyword.get(opts, :limit, 100) + offset = Keyword.get(opts, :offset, 0) + + Repo.all( + from(d in Device, + order_by: [desc: d.last_seen], + limit: ^limit, + offset: ^offset + ) + ) + end +end diff --git a/web-ng/lib/serviceradar_web_ng/inventory/device.ex b/web-ng/lib/serviceradar_web_ng/inventory/device.ex new file mode 100644 index 000000000..6797a8977 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/inventory/device.ex @@ -0,0 +1,25 @@ +defmodule ServiceRadarWebNG.Inventory.Device do + use Ecto.Schema + + @primary_key {:id, :string, autogenerate: false, source: :device_id} + @derive {Phoenix.Param, key: :id} + schema "unified_devices" do + field :ip, :string + field :poller_id, :string + field :agent_id, :string + field :hostname, :string + field :mac, :string + field :discovery_sources, {:array, :string} + field :is_available, :boolean + field :first_seen, :utc_datetime + field :last_seen, :utc_datetime + field :metadata, :map + field :device_type, :string + field :service_type, :string + field :service_status, :string + field :last_heartbeat, :utc_datetime + field :os_info, :string + field :version_info, :string + field :updated_at, :utc_datetime + end +end diff --git a/web-ng/lib/serviceradar_web_ng/mailer.ex b/web-ng/lib/serviceradar_web_ng/mailer.ex new file mode 100644 index 000000000..9c981c5ac --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/mailer.ex @@ -0,0 +1,3 @@ +defmodule ServiceRadarWebNG.Mailer do + use Swoosh.Mailer, otp_app: :serviceradar_web_ng +end diff --git a/web-ng/lib/serviceradar_web_ng/repo.ex b/web-ng/lib/serviceradar_web_ng/repo.ex new file mode 100644 index 000000000..43abb80dd --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/repo.ex @@ -0,0 +1,5 @@ +defmodule ServiceRadarWebNG.Repo do + use Ecto.Repo, + otp_app: :serviceradar_web_ng, + adapter: Ecto.Adapters.Postgres +end diff --git a/web-ng/lib/serviceradar_web_ng/srql.ex b/web-ng/lib/serviceradar_web_ng/srql.ex new file mode 100644 index 000000000..949c69cc5 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/srql.ex @@ -0,0 +1,219 @@ +defmodule ServiceRadarWebNG.SRQL do + @moduledoc false + + alias ServiceRadarWebNG.Repo + alias ServiceRadarWebNG.SRQL.Native + + @behaviour ServiceRadarWebNG.SRQLBehaviour + + def query(query, opts \\ %{}) when is_binary(query) do + query_request(%{ + "query" => query, + "limit" => Map.get(opts, :limit), + "cursor" => Map.get(opts, :cursor), + "direction" => Map.get(opts, :direction), + "mode" => Map.get(opts, :mode) + }) + end + + def query_request(%{} = request) do + with {:ok, query, limit, cursor, direction, mode} <- normalize_request(request), + {:ok, translation} <- translate(query, limit, cursor, direction, mode), + {:ok, response} <- execute_translation(translation) do + {:ok, response} + else + {:error, reason} -> {:error, reason} + end + end + + defp translate(query, limit, cursor, direction, mode) do + case Native.translate(query, limit, cursor, direction, mode) do + {:ok, json} when is_binary(json) -> + case Jason.decode(json) do + {:ok, decoded} when is_map(decoded) -> {:ok, decoded} + {:error, reason} -> {:error, reason} + end + + {:error, reason} -> + {:error, reason} + + other -> + {:error, {:unexpected_srql_translate_result, other}} + end + end + + defp execute_translation(%{"sql" => sql} = translation) when is_binary(sql) do + params = + translation + |> Map.get("params", []) + |> decode_params() + + with {:ok, params} <- params, + {:ok, result} <- Ecto.Adapters.SQL.query(Repo, sql, params) do + {:ok, build_response(translation, result)} + else + {:error, reason} -> {:error, reason} + end + end + + defp execute_translation(_translation) do + {:error, :invalid_srql_translation} + end + + defp build_response(translation, %Postgrex.Result{columns: columns, rows: rows}) do + results = + case columns do + [single] when is_binary(single) -> + Enum.map(rows, fn + [value] -> normalize_value(value) + other -> normalize_value(other) + end) + + _ -> + Enum.map(rows, fn row -> + columns + |> Enum.zip(row) + |> Map.new(fn {col, val} -> {col, normalize_value(val)} end) + end) + end + + viz = + case Map.get(translation, "viz") do + value when is_map(value) -> value + _ -> nil + end + + limit = + translation + |> get_in(["pagination", "limit"]) + |> case do + value when is_integer(value) -> value + _ -> nil + end + + prev_cursor = get_in(translation, ["pagination", "prev_cursor"]) + next_cursor_candidate = get_in(translation, ["pagination", "next_cursor"]) + + next_cursor = + cond do + is_integer(limit) and is_binary(next_cursor_candidate) and length(results) >= limit -> + next_cursor_candidate + + true -> + nil + end + + %{ + "results" => results, + "pagination" => %{ + "next_cursor" => next_cursor, + "prev_cursor" => prev_cursor, + "limit" => limit + }, + "viz" => viz, + "error" => nil + } + end + + defp normalize_value(%DateTime{} = value), do: DateTime.to_iso8601(value) + defp normalize_value(%NaiveDateTime{} = value), do: NaiveDateTime.to_iso8601(value) + defp normalize_value(%Date{} = value), do: Date.to_iso8601(value) + defp normalize_value(%Time{} = value), do: Time.to_iso8601(value) + defp normalize_value(%Decimal{} = value), do: Decimal.to_string(value) + defp normalize_value(value), do: value + + defp decode_params(params) when is_list(params) do + params + |> Enum.reduce_while({:ok, []}, fn param, {:ok, acc} -> + case decode_param(param) do + {:ok, decoded} -> {:cont, {:ok, [decoded | acc]}} + {:error, reason} -> {:halt, {:error, reason}} + end + end) + |> case do + {:ok, decoded} -> {:ok, Enum.reverse(decoded)} + {:error, reason} -> {:error, reason} + end + end + + defp decode_params(_), do: {:error, :invalid_srql_params} + + defp decode_param(%{"t" => "text", "v" => value}) when is_binary(value), do: {:ok, value} + defp decode_param(%{"t" => "bool", "v" => value}) when is_boolean(value), do: {:ok, value} + defp decode_param(%{"t" => "int", "v" => value}) when is_integer(value), do: {:ok, value} + + defp decode_param(%{"t" => "int_array", "v" => values}) when is_list(values) do + if Enum.all?(values, &is_integer/1) do + {:ok, values} + else + {:error, :invalid_int_array_param} + end + end + + defp decode_param(%{"t" => "float", "v" => value}) when is_float(value), do: {:ok, value} + defp decode_param(%{"t" => "float", "v" => value}) when is_integer(value), do: {:ok, value / 1} + + defp decode_param(%{"t" => "text_array", "v" => values}) when is_list(values) do + if Enum.all?(values, &is_binary/1) do + {:ok, values} + else + {:error, :invalid_text_array_param} + end + end + + defp decode_param(%{"t" => "timestamptz", "v" => value}) when is_binary(value) do + case DateTime.from_iso8601(value) do + {:ok, datetime, _offset} -> {:ok, datetime} + _ -> {:error, :invalid_timestamptz_param} + end + end + + defp decode_param(_), do: {:error, :invalid_srql_param} + + defp normalize_request(%{"query" => query} = request) when is_binary(query) do + limit = parse_limit(Map.get(request, "limit")) + cursor = normalize_optional_string(Map.get(request, "cursor")) + direction = normalize_direction(Map.get(request, "direction")) + mode = normalize_optional_string(Map.get(request, "mode")) + {:ok, query, limit, cursor, direction, mode} + end + + defp normalize_request(%{query: query} = request) when is_binary(query) do + limit = parse_limit(Map.get(request, :limit)) + cursor = normalize_optional_string(Map.get(request, :cursor)) + direction = normalize_direction(Map.get(request, :direction)) + mode = normalize_optional_string(Map.get(request, :mode)) + {:ok, query, limit, cursor, direction, mode} + end + + defp normalize_request(_request) do + {:error, "missing required field: query"} + end + + defp parse_limit(nil), do: nil + defp parse_limit(limit) when is_integer(limit), do: limit + + defp parse_limit(limit) when is_binary(limit) do + case Integer.parse(limit) do + {value, ""} -> value + _ -> nil + end + end + + defp normalize_optional_string(nil), do: nil + defp normalize_optional_string(""), do: nil + defp normalize_optional_string(value) when is_binary(value), do: value + defp normalize_optional_string(value), do: to_string(value) + + defp normalize_direction(nil), do: nil + + defp normalize_direction(direction) when direction in ["next", "prev"] do + direction + end + + defp normalize_direction(direction) when direction in [:next, :prev] do + Atom.to_string(direction) + end + + defp normalize_direction(_direction), do: nil +end diff --git a/web-ng/lib/serviceradar_web_ng/srql/behaviour.ex b/web-ng/lib/serviceradar_web_ng/srql/behaviour.ex new file mode 100644 index 000000000..5df178533 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/srql/behaviour.ex @@ -0,0 +1,7 @@ +defmodule ServiceRadarWebNG.SRQLBehaviour do + @moduledoc false + + @type srql_response :: map() + + @callback query_request(map()) :: {:ok, srql_response} | {:error, term()} +end diff --git a/web-ng/lib/serviceradar_web_ng/srql/native.ex b/web-ng/lib/serviceradar_web_ng/srql/native.ex new file mode 100644 index 000000000..042ffd8e3 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng/srql/native.ex @@ -0,0 +1,8 @@ +defmodule ServiceRadarWebNG.SRQL.Native do + @moduledoc false + + use Rustler, otp_app: :serviceradar_web_ng, crate: "srql_nif" + + def translate(_query, _limit, _cursor, _direction, _mode), + do: :erlang.nif_error(:nif_not_loaded) +end diff --git a/web-ng/lib/serviceradar_web_ng_web.ex b/web-ng/lib/serviceradar_web_ng_web.ex new file mode 100644 index 000000000..afd8eeede --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web.ex @@ -0,0 +1,116 @@ +defmodule ServiceRadarWebNGWeb do + @moduledoc """ + The entrypoint for defining your web interface, such + as controllers, components, channels, and so on. + + This can be used in your application as: + + use ServiceRadarWebNGWeb, :controller + use ServiceRadarWebNGWeb, :html + + The definitions below will be executed for every controller, + component, etc, so keep them short and clean, focused + on imports, uses and aliases. + + Do NOT define functions inside the quoted expressions + below. Instead, define additional modules and import + those modules here. + """ + + def static_paths, do: ~w(assets fonts images favicon.ico robots.txt) + + def router do + quote do + use Phoenix.Router, helpers: false + + # Import common connection and controller functions to use in pipelines + import Plug.Conn + import Phoenix.Controller + import Phoenix.LiveView.Router + end + end + + def channel do + quote do + use Phoenix.Channel + end + end + + def controller do + quote do + use Phoenix.Controller, formats: [:html, :json] + + use Gettext, backend: ServiceRadarWebNGWeb.Gettext + + import Plug.Conn + + unquote(verified_routes()) + end + end + + def live_view do + quote do + use Phoenix.LiveView + + unquote(html_helpers()) + end + end + + def live_component do + quote do + use Phoenix.LiveComponent + + unquote(html_helpers()) + end + end + + def html do + quote do + use Phoenix.Component + + # Import convenience functions from controllers + import Phoenix.Controller, + only: [get_csrf_token: 0, view_module: 1, view_template: 1] + + # Include general helpers for rendering HTML + unquote(html_helpers()) + end + end + + defp html_helpers do + quote do + # Translation + use Gettext, backend: ServiceRadarWebNGWeb.Gettext + + # HTML escaping functionality + import Phoenix.HTML + # Core UI components + import ServiceRadarWebNGWeb.CoreComponents + import ServiceRadarWebNGWeb.UIComponents + import ServiceRadarWebNGWeb.SRQLComponents + + # Common modules used in templates + alias Phoenix.LiveView.JS + alias ServiceRadarWebNGWeb.Layouts + + # Routes generation with the ~p sigil + unquote(verified_routes()) + end + end + + def verified_routes do + quote do + use Phoenix.VerifiedRoutes, + endpoint: ServiceRadarWebNGWeb.Endpoint, + router: ServiceRadarWebNGWeb.Router, + statics: ServiceRadarWebNGWeb.static_paths() + end + end + + @doc """ + When used, dispatch to the appropriate controller/live_view/etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/components/core_components.ex b/web-ng/lib/serviceradar_web_ng_web/components/core_components.ex new file mode 100644 index 000000000..c10e43e70 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/components/core_components.ex @@ -0,0 +1,498 @@ +defmodule ServiceRadarWebNGWeb.CoreComponents do + @moduledoc """ + Provides core UI components. + + At first glance, this module may seem daunting, but its goal is to provide + core building blocks for your application, such as tables, forms, and + inputs. The components consist mostly of markup and are well-documented + with doc strings and declarative assigns. You may customize and style + them in any way you want, based on your application growth and needs. + + The foundation for styling is Tailwind CSS, a utility-first CSS framework, + augmented with daisyUI, a Tailwind CSS plugin that provides UI components + and themes. Here are useful references: + + * [daisyUI](https://daisyui.com/docs/intro/) - a good place to get + started and see the available components. + + * [Tailwind CSS](https://tailwindcss.com) - the foundational framework + we build on. You will use it for layout, sizing, flexbox, grid, and + spacing. + + * [Heroicons](https://heroicons.com) - see `icon/1` for usage. + + * [Phoenix.Component](https://hexdocs.pm/phoenix_live_view/Phoenix.Component.html) - + the component system used by Phoenix. Some components, such as `<.link>` + and `<.form>`, are defined there. + + """ + use Phoenix.Component + use Gettext, backend: ServiceRadarWebNGWeb.Gettext + + alias Phoenix.LiveView.JS + + @doc """ + Renders flash notices. + + ## Examples + + <.flash kind={:info} flash={@flash} /> + <.flash kind={:info} phx-mounted={show("#flash")}>Welcome Back!</.flash> + """ + attr :id, :string, doc: "the optional id of flash container" + attr :flash, :map, default: %{}, doc: "the map of flash messages to display" + attr :title, :string, default: nil + attr :kind, :atom, values: [:info, :error], doc: "used for styling and flash lookup" + attr :rest, :global, doc: "the arbitrary HTML attributes to add to the flash container" + + slot :inner_block, doc: "the optional inner block that renders the flash message" + + def flash(assigns) do + assigns = assign_new(assigns, :id, fn -> "flash-#{assigns.kind}" end) + + ~H""" + <div + :if={msg = render_slot(@inner_block) || Phoenix.Flash.get(@flash, @kind)} + id={@id} + phx-click={JS.push("lv:clear-flash", value: %{key: @kind}) |> hide("##{@id}")} + role="alert" + class="toast toast-top toast-end z-50" + {@rest} + > + <div class={[ + "alert w-80 sm:w-96 max-w-80 sm:max-w-96 text-wrap", + @kind == :info && "alert-info", + @kind == :error && "alert-error" + ]}> + <.icon :if={@kind == :info} name="hero-information-circle" class="size-5 shrink-0" /> + <.icon :if={@kind == :error} name="hero-exclamation-circle" class="size-5 shrink-0" /> + <div> + <p :if={@title} class="font-semibold">{@title}</p> + <p>{msg}</p> + </div> + <div class="flex-1" /> + <button type="button" class="group self-start cursor-pointer" aria-label={gettext("close")}> + <.icon name="hero-x-mark" class="size-5 opacity-40 group-hover:opacity-70" /> + </button> + </div> + </div> + """ + end + + @doc """ + Renders a button with navigation support. + + ## Examples + + <.button>Send!</.button> + <.button phx-click="go" variant="primary">Send!</.button> + <.button navigate={~p"/"}>Home</.button> + """ + attr :rest, :global, include: ~w(href navigate patch method download name value disabled) + attr :class, :any + attr :variant, :string, values: ~w(primary) + slot :inner_block, required: true + + def button(%{rest: rest} = assigns) do + variants = %{"primary" => "btn-primary", nil => "btn-primary btn-soft"} + + assigns = + assign_new(assigns, :class, fn -> + ["btn", Map.fetch!(variants, assigns[:variant])] + end) + + if rest[:href] || rest[:navigate] || rest[:patch] do + ~H""" + <.link class={@class} {@rest}> + {render_slot(@inner_block)} + </.link> + """ + else + ~H""" + <button class={@class} {@rest}> + {render_slot(@inner_block)} + </button> + """ + end + end + + @doc """ + Renders an input with label and error messages. + + A `Phoenix.HTML.FormField` may be passed as argument, + which is used to retrieve the input name, id, and values. + Otherwise all attributes may be passed explicitly. + + ## Types + + This function accepts all HTML input types, considering that: + + * You may also set `type="select"` to render a `<select>` tag + + * `type="checkbox"` is used exclusively to render boolean values + + * For live file uploads, see `Phoenix.Component.live_file_input/1` + + See https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input + for more information. Unsupported types, such as radio, are best + written directly in your templates. + + ## Examples + + ```heex + <.input field={@form[:email]} type="email" /> + <.input name="my-input" errors={["oh no!"]} /> + ``` + + ## Select type + + When using `type="select"`, you must pass the `options` and optionally + a `value` to mark which option should be preselected. + + ```heex + <.input field={@form[:user_type]} type="select" options={["Admin": "admin", "User": "user"]} /> + ``` + + For more information on what kind of data can be passed to `options` see + [`options_for_select`](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html#options_for_select/2). + """ + attr :id, :any, default: nil + attr :name, :any + attr :label, :string, default: nil + attr :value, :any + + attr :type, :string, + default: "text", + values: ~w(checkbox color date datetime-local email file month number password + search select tel text textarea time url week hidden) + + attr :field, Phoenix.HTML.FormField, + doc: "a form field struct retrieved from the form, for example: @form[:email]" + + attr :errors, :list, default: [] + attr :checked, :boolean, doc: "the checked flag for checkbox inputs" + attr :prompt, :string, default: nil, doc: "the prompt for select inputs" + attr :options, :list, doc: "the options to pass to Phoenix.HTML.Form.options_for_select/2" + attr :multiple, :boolean, default: false, doc: "the multiple flag for select inputs" + attr :class, :any, default: nil, doc: "the input class to use over defaults" + attr :error_class, :any, default: nil, doc: "the input error class to use over defaults" + + attr :rest, :global, + include: ~w(accept autocomplete capture cols disabled form list max maxlength min minlength + multiple pattern placeholder readonly required rows size step) + + def input(%{field: %Phoenix.HTML.FormField{} = field} = assigns) do + errors = if Phoenix.Component.used_input?(field), do: field.errors, else: [] + + assigns + |> assign(field: nil, id: assigns.id || field.id) + |> assign(:errors, Enum.map(errors, &translate_error(&1))) + |> assign_new(:name, fn -> if assigns.multiple, do: field.name <> "[]", else: field.name end) + |> assign_new(:value, fn -> field.value end) + |> input() + end + + def input(%{type: "hidden"} = assigns) do + ~H""" + <input type="hidden" id={@id} name={@name} value={@value} {@rest} /> + """ + end + + def input(%{type: "checkbox"} = assigns) do + assigns = + assign_new(assigns, :checked, fn -> + Phoenix.HTML.Form.normalize_value("checkbox", assigns[:value]) + end) + + ~H""" + <div class="fieldset mb-2"> + <label> + <input + type="hidden" + name={@name} + value="false" + disabled={@rest[:disabled]} + form={@rest[:form]} + /> + <span class="label"> + <input + type="checkbox" + id={@id} + name={@name} + value="true" + checked={@checked} + class={@class || "checkbox checkbox-sm"} + {@rest} + />{@label} + </span> + </label> + <.error :for={msg <- @errors}>{msg}</.error> + </div> + """ + end + + def input(%{type: "select"} = assigns) do + ~H""" + <div class="fieldset mb-2"> + <label> + <span :if={@label} class="label mb-1">{@label}</span> + <select + id={@id} + name={@name} + class={[@class || "w-full select", @errors != [] && (@error_class || "select-error")]} + multiple={@multiple} + {@rest} + > + <option :if={@prompt} value="">{@prompt}</option> + {Phoenix.HTML.Form.options_for_select(@options, @value)} + </select> + </label> + <.error :for={msg <- @errors}>{msg}</.error> + </div> + """ + end + + def input(%{type: "textarea"} = assigns) do + ~H""" + <div class="fieldset mb-2"> + <label> + <span :if={@label} class="label mb-1">{@label}</span> + <textarea + id={@id} + name={@name} + class={[ + @class || "w-full textarea", + @errors != [] && (@error_class || "textarea-error") + ]} + {@rest} + >{Phoenix.HTML.Form.normalize_value("textarea", @value)}</textarea> + </label> + <.error :for={msg <- @errors}>{msg}</.error> + </div> + """ + end + + # All other inputs text, datetime-local, url, password, etc. are handled here... + def input(assigns) do + ~H""" + <div class="fieldset mb-2"> + <label> + <span :if={@label} class="label mb-1">{@label}</span> + <input + type={@type} + name={@name} + id={@id} + value={Phoenix.HTML.Form.normalize_value(@type, @value)} + class={[ + @class || "w-full input", + @errors != [] && (@error_class || "input-error") + ]} + {@rest} + /> + </label> + <.error :for={msg <- @errors}>{msg}</.error> + </div> + """ + end + + # Helper used by inputs to generate form errors + defp error(assigns) do + ~H""" + <p class="mt-1.5 flex gap-2 items-center text-sm text-error"> + <.icon name="hero-exclamation-circle" class="size-5" /> + {render_slot(@inner_block)} + </p> + """ + end + + @doc """ + Renders a header with title. + """ + slot :inner_block, required: true + slot :subtitle + slot :actions + + def header(assigns) do + ~H""" + <header class={[@actions != [] && "flex items-center justify-between gap-6", "pb-4"]}> + <div> + <h1 class="text-lg font-semibold leading-8"> + {render_slot(@inner_block)} + </h1> + <p :if={@subtitle != []} class="text-sm text-base-content/70"> + {render_slot(@subtitle)} + </p> + </div> + <div class="flex-none">{render_slot(@actions)}</div> + </header> + """ + end + + @doc """ + Renders a table with generic styling. + + ## Examples + + <.table id="users" rows={@users}> + <:col :let={user} label="id">{user.id}</:col> + <:col :let={user} label="username">{user.username}</:col> + </.table> + """ + attr :id, :string, required: true + attr :rows, :list, required: true + attr :row_id, :any, default: nil, doc: "the function for generating the row id" + attr :row_click, :any, default: nil, doc: "the function for handling phx-click on each row" + + attr :row_item, :any, + default: &Function.identity/1, + doc: "the function for mapping each row before calling the :col and :action slots" + + slot :col, required: true do + attr :label, :string + end + + slot :action, doc: "the slot for showing user actions in the last table column" + + def table(assigns) do + assigns = + with %{rows: %Phoenix.LiveView.LiveStream{}} <- assigns do + assign(assigns, row_id: assigns.row_id || fn {id, _item} -> id end) + end + + ~H""" + <table class="table table-zebra"> + <thead> + <tr> + <th :for={col <- @col}>{col[:label]}</th> + <th :if={@action != []}> + <span class="sr-only">{gettext("Actions")}</span> + </th> + </tr> + </thead> + <tbody id={@id} phx-update={is_struct(@rows, Phoenix.LiveView.LiveStream) && "stream"}> + <tr :for={row <- @rows} id={@row_id && @row_id.(row)}> + <td + :for={col <- @col} + phx-click={@row_click && @row_click.(row)} + class={@row_click && "hover:cursor-pointer"} + > + {render_slot(col, @row_item.(row))} + </td> + <td :if={@action != []} class="w-0 font-semibold"> + <div class="flex gap-4"> + <%= for action <- @action do %> + {render_slot(action, @row_item.(row))} + <% end %> + </div> + </td> + </tr> + </tbody> + </table> + """ + end + + @doc """ + Renders a data list. + + ## Examples + + <.list> + <:item title="Title">{@post.title}</:item> + <:item title="Views">{@post.views}</:item> + </.list> + """ + slot :item, required: true do + attr :title, :string, required: true + end + + def list(assigns) do + ~H""" + <ul class="list"> + <li :for={item <- @item} class="list-row"> + <div class="list-col-grow"> + <div class="font-bold">{item.title}</div> + <div>{render_slot(item)}</div> + </div> + </li> + </ul> + """ + end + + @doc """ + Renders a [Heroicon](https://heroicons.com). + + Heroicons come in three styles – outline, solid, and mini. + By default, the outline style is used, but solid and mini may + be applied by using the `-solid` and `-mini` suffix. + + You can customize the size and colors of the icons by setting + width, height, and background color classes. + + Icons are extracted from the `deps/heroicons` directory and bundled within + your compiled app.css by the plugin in `assets/vendor/heroicons.js`. + + ## Examples + + <.icon name="hero-x-mark" /> + <.icon name="hero-arrow-path" class="ml-1 size-3 motion-safe:animate-spin" /> + """ + attr :name, :string, required: true + attr :class, :any, default: "size-4" + + def icon(%{name: "hero-" <> _} = assigns) do + ~H""" + <span class={[@name, @class]} /> + """ + end + + ## JS Commands + + def show(js \\ %JS{}, selector) do + JS.show(js, + to: selector, + time: 300, + transition: + {"transition-all ease-out duration-300", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95", + "opacity-100 translate-y-0 sm:scale-100"} + ) + end + + def hide(js \\ %JS{}, selector) do + JS.hide(js, + to: selector, + time: 200, + transition: + {"transition-all ease-in duration-200", "opacity-100 translate-y-0 sm:scale-100", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95"} + ) + end + + @doc """ + Translates an error message using gettext. + """ + def translate_error({msg, opts}) do + # When using gettext, we typically pass the strings we want + # to translate as a static argument: + # + # # Translate the number of files with plural rules + # dngettext("errors", "1 file", "%{count} files", count) + # + # However the error messages in our forms and APIs are generated + # dynamically, so we need to translate them by calling Gettext + # with our gettext backend as first argument. Translations are + # available in the errors.po file (as we use the "errors" domain). + if count = opts[:count] do + Gettext.dngettext(ServiceRadarWebNGWeb.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(ServiceRadarWebNGWeb.Gettext, "errors", msg, opts) + end + end + + @doc """ + Translates the errors for a field from a keyword list of errors. + """ + def translate_errors(errors, field) when is_list(errors) do + for {^field, {msg, opts}} <- errors, do: translate_error({msg, opts}) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/components/layouts.ex b/web-ng/lib/serviceradar_web_ng_web/components/layouts.ex new file mode 100644 index 000000000..d82b4a0a2 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/components/layouts.ex @@ -0,0 +1,436 @@ +defmodule ServiceRadarWebNGWeb.Layouts do + @moduledoc """ + This module holds layouts and related functionality + used by your application. + """ + use ServiceRadarWebNGWeb, :html + + # Embed all files in layouts/* within this module. + # The default root.html.heex file contains the HTML + # skeleton of your application, namely HTML headers + # and other static content. + embed_templates "layouts/*" + + @doc """ + Renders your app layout. + + This function is typically invoked from every template, + and it often contains your application menu, sidebar, + or similar. + + ## Examples + + <Layouts.app flash={@flash}> + <h1>Content</h1> + </Layouts.app> + + """ + attr :flash, :map, required: true, doc: "the map of flash messages" + + attr :current_scope, :map, + default: nil, + doc: "the current [scope](https://hexdocs.pm/phoenix/scopes.html)" + + attr :srql, :map, default: %{}, doc: "SRQL query bar state for SRQL-driven pages" + + slot :inner_block, required: true + + def app(assigns) do + assigns = assign_new(assigns, :srql, fn -> %{} end) + current_scope = assigns[:current_scope] + signed_in? = is_map(current_scope) and not is_nil(Map.get(current_scope, :user)) + current_path = Map.get(assigns.srql, :page_path) + assigns = assign(assigns, signed_in?: signed_in?, current_path: current_path) + + ~H""" + <div class="drawer lg:drawer-open"> + <input id="sr-sidebar" type="checkbox" class="drawer-toggle" /> + + <div class="drawer-content flex min-h-screen flex-col"> + <header class="sticky top-0 z-20 border-b border-base-200 bg-base-100/90 backdrop-blur"> + <div class="px-4 sm:px-6 lg:px-8 py-3 flex flex-col gap-2"> + <%!-- Top row: hamburger, SRQL bar, and auth buttons --%> + <div class="flex items-center gap-3"> + <label + :if={@signed_in?} + for="sr-sidebar" + class="btn btn-ghost btn-sm lg:hidden shrink-0" + aria-label="Open navigation" + title="Open navigation" + > + <.icon name="hero-bars-3" class="size-5" /> + </label> + + <div :if={Map.get(@srql, :enabled, false)} class="flex-1 min-w-0"> + <.srql_query_bar + query={Map.get(@srql, :query)} + draft={Map.get(@srql, :draft)} + loading={Map.get(@srql, :loading, false)} + builder_available={Map.get(@srql, :builder_available, false)} + builder_open={Map.get(@srql, :builder_open, false)} + builder_supported={Map.get(@srql, :builder_supported, true)} + builder_sync={Map.get(@srql, :builder_sync, true)} + builder={Map.get(@srql, :builder, %{})} + /> + </div> + <div :if={not Map.get(@srql, :enabled, false)} class="flex-1"></div> + + <div class="flex items-center gap-2 shrink-0"> + <.theme_toggle :if={not @signed_in?} /> + + <%= if not @signed_in? do %> + <.ui_button href={~p"/users/register"} variant="ghost" size="sm">Register</.ui_button> + <.ui_button href={~p"/users/log-in"} variant="primary" size="sm">Log in</.ui_button> + <% end %> + </div> + </div> + + <%!-- Second row: breadcrumb navigation (all on one line) --%> + <.breadcrumb_nav :if={@current_path} current_path={@current_path} /> + </div> + </header> + + <div + :if={Map.get(@srql, :builder_open, false) or Map.get(@srql, :error)} + class="border-b border-base-200 bg-base-100" + > + <div class="px-4 sm:px-6 lg:px-8 py-4"> + <div :if={Map.get(@srql, :error)} class="mb-3 text-xs text-error"> + {Map.get(@srql, :error)} + </div> + + <.srql_query_builder + :if={Map.get(@srql, :builder_open, false)} + supported={Map.get(@srql, :builder_supported, true)} + sync={Map.get(@srql, :builder_sync, true)} + builder={Map.get(@srql, :builder, %{})} + /> + </div> + </div> + + <main class="px-4 py-6 sm:px-6 lg:px-8 flex-1"> + {render_slot(@inner_block)} + </main> + + <.flash_group flash={@flash} /> + </div> + + <div :if={@signed_in?} class="drawer-side z-30"> + <label for="sr-sidebar" class="drawer-overlay" aria-label="Close navigation"></label> + <aside class="w-48 bg-base-100 border-r border-base-200 min-h-full flex flex-col"> + <div class="p-3"> + <.link href={~p"/"} class="flex items-center gap-2 mb-4"> + <img + src={~p"/images/logo.svg"} + alt="ServiceRadar" + class="size-6 opacity-95" + width="24" + height="24" + /> + <span class="font-semibold text-sm tracking-tight">ServiceRadar</span> + </.link> + + <ul class="menu menu-sm"> + <li> + <.sidebar_link + href={~p"/analytics"} + label="Analytics" + icon="hero-chart-bar" + active={@current_path == "/analytics"} + /> + </li> + <li> + <.sidebar_link + href={~p"/devices"} + label="Devices" + icon="hero-server" + active={@current_path == "/devices"} + /> + </li> + <li> + <.sidebar_link + href={~p"/services"} + label="Services" + icon="hero-cog-6-tooth" + active={@current_path in ["/services", "/pollers"]} + /> + </li> + <li> + <.sidebar_link + href={~p"/interfaces"} + label="Interfaces" + icon="hero-globe-alt" + active={@current_path == "/interfaces"} + /> + </li> + <li> + <.sidebar_link + href={~p"/events"} + label="Events" + icon="hero-bell-alert" + active={@current_path == "/events"} + /> + </li> + <li> + <.sidebar_link + href={~p"/observability"} + label="Observability" + icon="hero-presentation-chart-line" + active={@current_path in ["/observability", "/logs"]} + /> + </li> + </ul> + </div> + + <div class="mt-auto p-3 border-t border-base-200"> + <div class="dropdown dropdown-top w-full"> + <div + tabindex="0" + role="button" + class="flex items-center gap-2 p-2 rounded-lg hover:bg-base-200 cursor-pointer w-full" + > + <div class="avatar avatar-placeholder"> + <div class="bg-neutral text-neutral-content w-8 rounded-full"> + <span class="text-xs">{user_initials(@current_scope.user.email)}</span> + </div> + </div> + <.icon name="hero-chevron-up" class="size-3 text-base-content/50 ml-auto" /> + </div> + <ul + tabindex="0" + class="dropdown-content menu bg-base-200 rounded-box z-10 w-56 p-2 shadow-lg mb-2" + > + <li> + <div class="flex flex-col gap-2"> + <span class="text-[10px] uppercase tracking-wider text-base-content/60"> + Theme + </span> + <.theme_toggle /> + </div> + </li> + <li> + <.link href={~p"/users/settings"} class="text-sm"> + <.icon name="hero-cog-6-tooth" class="size-4" /> Settings + </.link> + </li> + <li> + <.link href={~p"/users/log-out"} method="delete" class="text-sm"> + <.icon name="hero-arrow-right-on-rectangle" class="size-4" /> Log out + </.link> + </li> + </ul> + </div> + </div> + </aside> + </div> + </div> + """ + end + + defp user_initials(email) when is_binary(email) do + email + |> String.split("@") + |> List.first() + |> String.slice(0, 2) + |> String.upcase() + end + + defp user_initials(_), do: "?" + + attr :href, :string, required: true + attr :label, :string, required: true + attr :icon, :string, default: nil + attr :active, :boolean, default: false + + def sidebar_link(assigns) do + ~H""" + <.link + href={@href} + aria-current={@active && "page"} + class={[ + "flex items-center gap-2", + @active && "active" + ]} + > + <.icon :if={@icon} name={@icon} class="size-4 opacity-80" /> + <span class="truncate">{@label}</span> + </.link> + """ + end + + attr :current_path, :string, required: true + + defp breadcrumb_nav(assigns) do + crumbs = build_breadcrumbs(assigns.current_path) + assigns = assign(assigns, :crumbs, crumbs) + + ~H""" + <nav class="text-xs sm:text-sm"> + <div class="breadcrumbs"> + <ul class="flex items-center flex-wrap min-w-0"> + <li> + <.link + href={~p"/analytics"} + class="flex items-center gap-1.5 text-base-content/60 hover:text-base-content" + title="Home" + > + <.icon name="hero-home-micro" class="size-3.5" /> + </.link> + </li> + <li :for={crumb <- @crumbs}> + <.link + :if={crumb.href != nil} + href={crumb.href} + class="flex items-center gap-1.5 text-base-content/60 hover:text-base-content" + title={crumb.label} + > + <.icon :if={crumb.icon} name={crumb.icon} class="size-3.5 shrink-0" /> + <span>{crumb.label}</span> + </.link> + <span + :if={crumb.href == nil} + class="flex items-center gap-1.5 font-medium text-base-content truncate max-w-[20rem]" + title={crumb.label} + > + {crumb.label} + </span> + </li> + </ul> + </div> + </nav> + """ + end + + defp build_breadcrumbs(path) when is_binary(path) do + segments = + path + |> String.trim_leading("/") + |> String.split("/") + |> Enum.reject(&(&1 == "")) + + case segments do + [] -> + [] + + [section] -> + [%{label: section_label(section), icon: section_icon(section), href: nil}] + + [section, id] -> + [ + %{label: section_label(section), icon: section_icon(section), href: "/#{section}"}, + %{label: format_id(id), icon: nil, href: nil} + ] + + [section, id | _rest] -> + [ + %{label: section_label(section), icon: section_icon(section), href: "/#{section}"}, + %{label: format_id(id), icon: nil, href: nil} + ] + end + end + + defp build_breadcrumbs(_), do: [] + + defp section_label("analytics"), do: "Analytics" + defp section_label("devices"), do: "Devices" + defp section_label("pollers"), do: "Pollers" + defp section_label("events"), do: "Events" + defp section_label("logs"), do: "Logs" + defp section_label("observability"), do: "Observability" + defp section_label("services"), do: "Services" + defp section_label("interfaces"), do: "Interfaces" + defp section_label(other), do: String.capitalize(other) + + defp section_icon("analytics"), do: "hero-chart-bar-micro" + defp section_icon("devices"), do: "hero-server-micro" + defp section_icon("pollers"), do: "hero-cog-6-tooth-micro" + defp section_icon("events"), do: "hero-bell-alert-micro" + defp section_icon("logs"), do: "hero-presentation-chart-line-micro" + defp section_icon("observability"), do: "hero-presentation-chart-line-micro" + defp section_icon("services"), do: "hero-cog-6-tooth-micro" + defp section_icon("interfaces"), do: "hero-globe-alt-micro" + defp section_icon(_), do: nil + + defp format_id(id) when is_binary(id), do: URI.decode(id) + defp format_id(id), do: to_string(id) + + @doc """ + Shows the flash group with standard titles and content. + + ## Examples + + <.flash_group flash={@flash} /> + """ + attr :flash, :map, required: true, doc: "the map of flash messages" + attr :id, :string, default: "flash-group", doc: "the optional id of flash container" + + def flash_group(assigns) do + ~H""" + <div id={@id} aria-live="polite"> + <.flash kind={:info} flash={@flash} /> + <.flash kind={:error} flash={@flash} /> + + <.flash + id="client-error" + kind={:error} + title={gettext("We can't find the internet")} + phx-disconnected={show(".phx-client-error #client-error") |> JS.remove_attribute("hidden")} + phx-connected={hide("#client-error") |> JS.set_attribute({"hidden", ""})} + hidden + > + {gettext("Attempting to reconnect")} + <.icon name="hero-arrow-path" class="ml-1 size-3 motion-safe:animate-spin" /> + </.flash> + + <.flash + id="server-error" + kind={:error} + title={gettext("Something went wrong!")} + phx-disconnected={show(".phx-server-error #server-error") |> JS.remove_attribute("hidden")} + phx-connected={hide("#server-error") |> JS.set_attribute({"hidden", ""})} + hidden + > + {gettext("Attempting to reconnect")} + <.icon name="hero-arrow-path" class="ml-1 size-3 motion-safe:animate-spin" /> + </.flash> + </div> + """ + end + + @doc """ + Provides dark vs light theme toggle based on themes defined in app.css. + + See <head> in root.html.heex which applies the theme before page load. + """ + def theme_toggle(assigns) do + ~H""" + <div class="card relative flex flex-row items-center border-2 border-base-300 bg-base-300 rounded-full"> + <div class="absolute w-1/3 h-full rounded-full border-1 border-base-200 bg-base-100 brightness-200 left-0 [[data-theme=light]_&]:left-1/3 [[data-theme=dark]_&]:left-2/3 transition-[left]" /> + + <button + class="flex p-2 cursor-pointer w-1/3" + phx-click={JS.dispatch("phx:set-theme")} + data-phx-theme="system" + > + <.icon name="hero-computer-desktop-micro" class="size-4 opacity-75 hover:opacity-100" /> + </button> + + <button + class="flex p-2 cursor-pointer w-1/3" + phx-click={JS.dispatch("phx:set-theme")} + data-phx-theme="light" + > + <.icon name="hero-sun-micro" class="size-4 opacity-75 hover:opacity-100" /> + </button> + + <button + class="flex p-2 cursor-pointer w-1/3" + phx-click={JS.dispatch("phx:set-theme")} + data-phx-theme="dark" + > + <.icon name="hero-moon-micro" class="size-4 opacity-75 hover:opacity-100" /> + </button> + </div> + """ + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/components/layouts/root.html.heex b/web-ng/lib/serviceradar_web_ng_web/components/layouts/root.html.heex new file mode 100644 index 000000000..6415387da --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/components/layouts/root.html.heex @@ -0,0 +1,36 @@ +<!DOCTYPE html> +<html lang="en"> + <head> + <meta charset="utf-8" /> + <meta name="viewport" content="width=device-width, initial-scale=1" /> + <meta name="csrf-token" content={get_csrf_token()} /> + <.live_title default="ServiceRadarWebNG" suffix=" · Phoenix Framework"> + {assigns[:page_title]} + </.live_title> + <link phx-track-static rel="stylesheet" href={~p"/assets/css/app.css"} /> + <script defer phx-track-static type="text/javascript" src={~p"/assets/js/app.js"}> + </script> + <script> + (() => { + const setTheme = (theme) => { + if (theme === "system") { + localStorage.removeItem("phx:theme"); + document.documentElement.removeAttribute("data-theme"); + } else { + localStorage.setItem("phx:theme", theme); + document.documentElement.setAttribute("data-theme", theme); + } + }; + if (!document.documentElement.hasAttribute("data-theme")) { + setTheme(localStorage.getItem("phx:theme") || "system"); + } + window.addEventListener("storage", (e) => e.key === "phx:theme" && setTheme(e.newValue || "system")); + + window.addEventListener("phx:set-theme", (e) => setTheme(e.target.dataset.phxTheme)); + })(); + </script> + </head> + <body> + {@inner_content} + </body> +</html> diff --git a/web-ng/lib/serviceradar_web_ng_web/components/srql_components.ex b/web-ng/lib/serviceradar_web_ng_web/components/srql_components.ex new file mode 100644 index 000000000..5229a2bb2 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/components/srql_components.ex @@ -0,0 +1,818 @@ +defmodule ServiceRadarWebNGWeb.SRQLComponents do + @moduledoc false + + use Phoenix.Component + + import ServiceRadarWebNGWeb.CoreComponents, only: [icon: 1] + import ServiceRadarWebNGWeb.UIComponents + + alias ServiceRadarWebNGWeb.SRQL.Catalog + + attr :query, :string, default: nil + attr :draft, :string, default: nil + attr :loading, :boolean, default: false + attr :builder_available, :boolean, default: true + attr :builder_open, :boolean, default: false + attr :builder_supported, :boolean, default: true + attr :builder_sync, :boolean, default: true + attr :builder, :map, default: %{} + + def srql_query_bar(assigns) do + assigns = + assigns + |> assign_new(:draft, fn -> assigns.query end) + |> assign_new(:builder, fn -> %{} end) + + ~H""" + <div class="w-full max-w-4xl"> + <form + phx-change="srql_change" + phx-submit="srql_submit" + class="flex items-center gap-2 w-full" + autocomplete="off" + > + <div class="flex-1 min-w-0"> + <.ui_input + type="text" + name="q" + value={@draft || ""} + placeholder="SRQL query (e.g. in:devices time:last_7d sort:last_seen:desc limit:100)" + mono + class="w-full text-xs" + /> + </div> + + <.ui_icon_button + :if={@builder_available} + active={@builder_open} + aria-label="Toggle query builder" + title="Query builder" + phx-click="srql_builder_toggle" + > + <.icon name="hero-adjustments-horizontal" class="size-4" /> + </.ui_icon_button> + + <.ui_button variant="primary" size="sm" type="submit"> + <span :if={@loading} class="loading loading-spinner loading-xs" /> Run + </.ui_button> + </form> + </div> + """ + end + + attr :id, :string, required: true + attr :rows, :list, default: [] + attr :columns, :list, default: nil + attr :max_columns, :integer, default: 10 + attr :container, :boolean, default: true + attr :class, :any, default: nil + attr :empty_message, :string, default: "No results." + + def srql_results_table(assigns) do + columns = + assigns.columns + |> normalize_columns(assigns.rows, assigns.max_columns) + + assigns = assign(assigns, :columns, columns) + + ~H""" + <div class={[ + "overflow-x-auto", + @container && "rounded-xl border border-base-200 bg-base-100 shadow-sm", + @class + ]}> + <table id={@id} class="table table-sm table-zebra w-full"> + <thead> + <tr> + <%= for col <- @columns do %> + <th class="whitespace-nowrap text-xs font-semibold text-base-content/70 bg-base-200/60"> + {col} + </th> + <% end %> + </tr> + </thead> + <tbody> + <tr :if={@rows == []}> + <td + colspan={max(length(@columns), 1)} + class="text-sm text-base-content/60 py-8 text-center" + > + {@empty_message} + </td> + </tr> + + <%= for {row, idx} <- Enum.with_index(@rows) do %> + <tr id={"#{@id}-row-#{idx}"} class="hover:bg-base-200/40"> + <%= for col <- @columns do %> + <td class="whitespace-nowrap text-xs max-w-[24rem] truncate"> + <.srql_cell col={col} value={Map.get(row, col)} /> + </td> + <% end %> + </tr> + <% end %> + </tbody> + </table> + </div> + """ + end + + attr :col, :string, required: true + attr :value, :any, default: nil + + def srql_cell(assigns) do + assigns = + assigns + |> assign(:col_key, assigns.col |> to_string() |> String.trim() |> String.downcase()) + |> assign(:formatted, format_cell(assigns.col, assigns.value)) + + ~H""" + <%= case @formatted do %> + <% {:time, %{display: display, iso: iso}} -> %> + <time datetime={iso} title={iso} class="font-mono text-[11px]"> + {display} + </time> + <% {:link, %{href: href, label: label}} -> %> + <a href={href} target="_blank" rel="noreferrer" class="link link-hover font-mono text-[11px]"> + {label} + </a> + <% {:severity, %{label: label, variant: variant}} -> %> + <.ui_badge variant={variant} size="xs">{label}</.ui_badge> + <% {:boolean, %{label: label, variant: variant}} -> %> + <.ui_badge variant={variant} size="xs">{label}</.ui_badge> + <% {:text, %{value: value, title: title}} -> %> + <span title={title}>{value}</span> + <% {:json, %{value: value, title: title}} -> %> + <span title={title} class="font-mono text-[11px]">{value}</span> + <% end %> + """ + end + + attr :viz, :any, default: :none + + def srql_auto_viz(assigns) do + ~H""" + <.ui_panel> + <:header> + <div class="min-w-0"> + <div class="text-sm font-semibold">Auto Visualization</div> + <div class="text-xs text-base-content/70"> + A best-effort visualization inferred from the SRQL result set (beta). + </div> + </div> + </:header> + + <div :if={@viz == :none} class="text-sm text-base-content/70"> + No visualization detected yet. Try a timeseries query (timestamp + numeric value) or a grouped count. + </div> + + <.timeseries_viz :if={match?({:timeseries, _}, @viz)} viz={@viz} /> + <.categories_viz :if={match?({:categories, _}, @viz)} viz={@viz} /> + </.ui_panel> + """ + end + + attr :viz, :any, required: true + + defp timeseries_viz(%{viz: {:timeseries, %{x: x, y: y, points: points}}} = assigns) do + assigns = + assigns + |> assign(:x, x) + |> assign(:y, y) + |> assign(:points, points) + |> assign(:spark, sparkline(points)) + + ~H""" + <div class="flex flex-col gap-3"> + <div class="text-xs text-base-content/60"> + Timeseries: <span class="font-mono">{@y}</span> over <span class="font-mono">{@x}</span> + </div> + + <div class="rounded-lg border border-base-200 bg-base-100 p-3"> + <svg viewBox="0 0 400 120" class="w-full h-28"> + <polyline + fill="none" + stroke="currentColor" + stroke-width="2" + class="text-primary" + points={@spark} + /> + </svg> + </div> + </div> + """ + end + + defp timeseries_viz(assigns), do: assigns |> assign(:viz, :none) |> timeseries_viz() + + attr :viz, :any, required: true + + defp categories_viz( + %{viz: {:categories, %{label: label, value: value, items: items}}} = assigns + ) do + max_v = + items + |> Enum.map(fn {_k, v} -> v end) + |> Enum.max(fn -> 1 end) + + assigns = + assigns + |> assign(:label, label) + |> assign(:value, value) + |> assign(:items, items) + |> assign(:max_v, max_v) + + ~H""" + <div class="flex flex-col gap-3"> + <div class="text-xs text-base-content/60"> + Categories: <span class="font-mono">{@value}</span> by <span class="font-mono">{@label}</span> + </div> + + <div class="flex flex-col gap-2"> + <%= for {k, v} <- @items do %> + <div class="flex items-center gap-3"> + <div class="w-48 truncate text-sm" title={to_string(k)}>{format_category_label(k)}</div> + <div class="flex-1"> + <div class="h-2 rounded-full bg-base-200 overflow-hidden"> + <div + class="h-2 bg-primary/70" + style={"width: #{round((v / @max_v) * 100)}%"} + /> + </div> + </div> + <div class="w-20 text-right text-sm font-mono">{format_number(v)}</div> + </div> + <% end %> + </div> + </div> + """ + end + + defp categories_viz(assigns), do: assigns |> assign(:viz, :none) |> categories_viz() + + defp sparkline(points) when is_list(points) do + values = Enum.map(points, fn {_dt, v} -> v end) + + case {values, Enum.min(values, fn -> 0 end), Enum.max(values, fn -> 0 end)} do + {[], _, _} -> + "" + + {_values, min_v, max_v} when min_v == max_v -> + Enum.with_index(values) + |> Enum.map(fn {_v, idx} -> + x = idx_to_x(idx, length(values)) + "#{x},60" + end) + |> Enum.join(" ") + + {_values, min_v, max_v} -> + Enum.with_index(values) + |> Enum.map(fn {v, idx} -> + x = idx_to_x(idx, length(values)) + y = 110 - round((v - min_v) / (max_v - min_v) * 100) + "#{x},#{y}" + end) + |> Enum.join(" ") + end + end + + defp idx_to_x(_idx, 0), do: 0 + defp idx_to_x(0, _len), do: 0 + + defp idx_to_x(idx, len) when len > 1 do + round(idx / (len - 1) * 400) + end + + defp format_number(v) when is_float(v), do: :erlang.float_to_binary(v, decimals: 2) + defp format_number(v) when is_integer(v), do: Integer.to_string(v) + defp format_number(v), do: to_string(v) + + defp srql_columns([], _max), do: [] + + defp srql_columns([first | _], max) when is_map(first) and is_integer(max) and max > 0 do + first + |> Map.keys() + |> Enum.map(&to_string/1) + |> Enum.sort() + |> Enum.take(max) + end + + defp srql_columns(_, _max), do: [] + + defp normalize_columns(nil, rows, max_columns), do: srql_columns(rows, max_columns) + + defp normalize_columns(columns, rows, max_columns) when is_list(columns) do + columns = + columns + |> Enum.map(&to_string/1) + |> Enum.map(&String.trim/1) + |> Enum.reject(&(&1 == "")) + + if columns == [] do + srql_columns(rows, max_columns) + else + columns + end + end + + defp normalize_columns(_columns, rows, max_columns), do: srql_columns(rows, max_columns) + + defp format_cell(col, value) do + col = col |> to_string() |> String.trim() + + cond do + is_nil(value) -> + {:text, %{value: "", title: nil}} + + is_boolean(value) -> + {:boolean, + %{ + label: if(value, do: "true", else: "false"), + variant: if(value, do: "success", else: "error") + }} + + severity_column?(col) and is_binary(value) -> + {:severity, severity_badge(value)} + + time_column?(col) and is_binary(value) -> + format_time_string(value) + + is_binary(value) -> + format_text_string(value) + + is_number(value) -> + {:text, %{value: to_string(value), title: nil}} + + is_list(value) or is_map(value) -> + rendered = + value + |> inspect(limit: 5, printable_limit: 1_000) + |> String.slice(0, 200) + + {:json, %{value: rendered, title: rendered}} + + true -> + {:text, %{value: to_string(value), title: nil}} + end + end + + defp severity_column?(col) do + col_key = col |> String.downcase() + col_key in ["severity", "severity_text", "level", "service_status"] + end + + defp time_column?(col) do + col_key = col |> String.downcase() + + String.ends_with?(col_key, "_at") or String.ends_with?(col_key, "_time") or + String.ends_with?(col_key, "_timestamp") or + col_key in ["timestamp", "event_timestamp", "last_seen", "first_seen"] + end + + defp severity_badge(value) when is_binary(value) do + normalized = value |> String.trim() |> String.downcase() + + variant = + cond do + normalized in ["critical", "fatal", "error"] -> "error" + normalized in ["warn", "warning", "high"] -> "warning" + normalized in ["info", "medium"] -> "info" + normalized in ["debug", "low", "ok", "healthy"] -> "success" + normalized in ["down", "offline", "unavailable"] -> "error" + normalized in ["up", "online", "available"] -> "success" + true -> "ghost" + end + + %{label: value, variant: variant} + end + + defp format_time_string(value) when is_binary(value) do + value = String.trim(value) + + case parse_iso8601(value) do + {:ok, dt, iso} -> + {:time, %{display: Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC"), iso: iso}} + + :error -> + format_composite_string(value) + end + end + + defp format_text_string(value) when is_binary(value) do + value = String.trim(value) + + cond do + match?({:ok, _, _}, parse_iso8601(value)) -> + {:ok, dt, iso} = parse_iso8601(value) + {:time, %{display: Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC"), iso: iso}} + + url?(value) -> + {:link, %{href: value, label: url_label(value)}} + + true -> + format_composite_string(value) + end + end + + defp format_composite_string(value) when is_binary(value) do + case String.split(value, ",", parts: 2) do + [left, right] -> + left = String.trim(left) + right = String.trim(right) + + with {:ok, dt, _iso} <- parse_iso8601(left) do + label = url_label(right) + + if url?(right) do + {:text, + %{ + value: "#{Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC")} · #{label}", + title: value + }} + else + {:text, + %{ + value: "#{Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC")} · #{right}", + title: value + }} + end + else + _ -> {:text, %{value: value, title: value}} + end + + _ -> + {:text, %{value: value, title: value}} + end + end + + defp parse_iso8601(value) when is_binary(value) do + value = String.trim(value) + + cond do + value == "" -> + :error + + true -> + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt, DateTime.to_iso8601(dt)} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> + dt = DateTime.from_naive!(ndt, "Etc/UTC") + {:ok, dt, DateTime.to_iso8601(dt)} + + {:error, _} -> + :error + end + end + end + end + + defp url?(value) when is_binary(value) do + String.starts_with?(value, "http://") or String.starts_with?(value, "https://") + end + + defp url_label(value) when is_binary(value) do + uri = URI.parse(value) + + host = + case uri.host do + nil -> + value + + other -> + port = + case {uri.scheme, uri.port} do + {"http", nil} -> nil + {"https", nil} -> nil + {"http", 80} -> nil + {"https", 443} -> nil + {_scheme, port} -> port + end + + if is_integer(port) do + "#{other}:#{port}" + else + other + end + end + + path = + case uri.path do + nil -> "" + "/" -> "" + other -> other + end + + label = host <> path + + if is_binary(uri.query) and uri.query != "" do + label <> "?…" + else + label + end + end + + defp format_category_label(value) when is_binary(value) do + value = String.trim(value) + + cond do + match?({:ok, _, _}, parse_iso8601(value)) -> + {:ok, dt, _iso} = parse_iso8601(value) + Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC") + + url?(value) -> + url_label(value) + + true -> + value + end + end + + defp format_category_label(value), do: to_string(value) + + attr :supported, :boolean, default: true + attr :sync, :boolean, default: true + attr :builder, :map, default: %{} + + def srql_query_builder(assigns) do + assigns = assign_new(assigns, :builder, fn -> %{} end) + + entity = Map.get(assigns.builder, "entity", "devices") + config = Catalog.entity(entity) + supports_downsample = Map.get(config, :downsample, false) + series_fields = Map.get(config, :series_fields, []) + + assigns = + assigns + |> assign(:entities, Catalog.entities()) + |> assign(:config, config) + |> assign(:supports_downsample, supports_downsample) + |> assign(:series_fields, series_fields) + + ~H""" + <.ui_panel> + <:header> + <div class="min-w-0"> + <div class="text-sm font-semibold">Query Builder</div> + <div class="text-xs text-base-content/70"> + Compose a query visually. SRQL text remains the source of truth. + </div> + </div> + + <div class="shrink-0 flex items-center gap-2"> + <.ui_badge :if={not @supported} variant="warning" size="sm">Limited</.ui_badge> + <.ui_badge :if={@supported and not @sync} size="sm">Not applied</.ui_badge> + + <.ui_button + :if={not @supported or not @sync} + size="sm" + variant="ghost" + type="button" + phx-click="srql_builder_apply" + > + Replace query + </.ui_button> + </div> + </:header> + + <div :if={not @supported} class="mb-3 text-xs text-warning"> + This SRQL query can’t be fully represented by the builder yet. The builder won’t overwrite your query unless you + click “Replace query”. + </div> + + <form phx-change="srql_builder_change" autocomplete="off" class="overflow-x-auto"> + <div class="min-w-[880px]"> + <div class="flex items-start gap-10"> + <div class="flex flex-col items-start gap-5"> + <.srql_builder_pill label="In" root> + <.ui_inline_select name="builder[entity]" disabled={not @supported}> + <%= for e <- @entities do %> + <option value={e.id} selected={@builder["entity"] == e.id}> + {e.label} + </option> + <% end %> + </.ui_inline_select> + </.srql_builder_pill> + + <div class="pl-10 border-l-2 border-primary/30 flex flex-col gap-5"> + <.srql_builder_pill label="Time"> + <.ui_inline_select name="builder[time]" disabled={not @supported}> + <option value="" selected={(@builder["time"] || "") == ""}>Any</option> + <option value="last_1h" selected={@builder["time"] == "last_1h"}> + Last 1h + </option> + <option value="last_24h" selected={@builder["time"] == "last_24h"}> + Last 24h + </option> + <option value="last_7d" selected={@builder["time"] == "last_7d"}> + Last 7d + </option> + <option value="last_30d" selected={@builder["time"] == "last_30d"}> + Last 30d + </option> + </.ui_inline_select> + </.srql_builder_pill> + + <div :if={@supports_downsample} class="flex flex-wrap items-center gap-4"> + <div class="text-xs text-base-content/60 font-medium">Downsample</div> + + <.srql_builder_pill label="Bucket"> + <.ui_inline_select name="builder[bucket]" disabled={not @supported}> + <option value="" selected={(@builder["bucket"] || "") == ""}> + (none) + </option> + <option value="15s" selected={@builder["bucket"] == "15s"}>15s</option> + <option value="1m" selected={@builder["bucket"] == "1m"}>1m</option> + <option value="5m" selected={@builder["bucket"] == "5m"}>5m</option> + <option value="15m" selected={@builder["bucket"] == "15m"}>15m</option> + <option value="1h" selected={@builder["bucket"] == "1h"}>1h</option> + <option value="6h" selected={@builder["bucket"] == "6h"}>6h</option> + <option value="1d" selected={@builder["bucket"] == "1d"}>1d</option> + </.ui_inline_select> + </.srql_builder_pill> + + <.srql_builder_pill label="Agg"> + <.ui_inline_select name="builder[agg]" disabled={not @supported}> + <option value="avg" selected={(@builder["agg"] || "avg") == "avg"}>avg</option> + <option value="min" selected={@builder["agg"] == "min"}>min</option> + <option value="max" selected={@builder["agg"] == "max"}>max</option> + <option value="sum" selected={@builder["agg"] == "sum"}>sum</option> + <option value="count" selected={@builder["agg"] == "count"}>count</option> + </.ui_inline_select> + </.srql_builder_pill> + + <.srql_builder_pill label="Series"> + <%= if @series_fields == [] do %> + <.ui_inline_input + type="text" + name="builder[series]" + value={@builder["series"] || ""} + placeholder="field" + class="w-40 placeholder:text-base-content/40" + disabled={not @supported} + /> + <% else %> + <.ui_inline_select name="builder[series]" disabled={not @supported}> + <option value="" selected={(@builder["series"] || "") == ""}> + (none) + </option> + <%= for field <- @series_fields do %> + <option value={field} selected={@builder["series"] == field}> + {field} + </option> + <% end %> + </.ui_inline_select> + <% end %> + </.srql_builder_pill> + </div> + + <div class="flex flex-col gap-3"> + <div class="text-xs text-base-content/60 font-medium">Filters</div> + + <div class="flex flex-col gap-3"> + <%= for {filter, idx} <- Enum.with_index(Map.get(@builder, "filters", [])) do %> + <div class="flex items-center gap-3"> + <.srql_builder_pill label="Filter"> + <%= if @config.filter_fields == [] do %> + <.ui_inline_input + type="text" + name={"builder[filters][#{idx}][field]"} + value={filter["field"] || ""} + placeholder="field" + class="w-40 placeholder:text-base-content/40" + disabled={not @supported} + /> + <% else %> + <.ui_inline_select + name={"builder[filters][#{idx}][field]"} + disabled={not @supported} + > + <%= for field <- @config.filter_fields do %> + <option value={field} selected={filter["field"] == field}> + {field} + </option> + <% end %> + </.ui_inline_select> + <% end %> + + <.ui_inline_select + name={"builder[filters][#{idx}][op]"} + disabled={not @supported} + class="text-xs text-base-content/70" + > + <option + value="contains" + selected={(filter["op"] || "contains") == "contains"} + > + contains + </option> + <option value="not_contains" selected={filter["op"] == "not_contains"}> + does not contain + </option> + <option value="equals" selected={filter["op"] == "equals"}> + equals + </option> + <option value="not_equals" selected={filter["op"] == "not_equals"}> + does not equal + </option> + </.ui_inline_select> + + <.ui_inline_input + type="text" + name={"builder[filters][#{idx}][value]"} + value={filter["value"] || ""} + placeholder="value" + class="placeholder:text-base-content/40 w-56" + disabled={not @supported} + /> + </.srql_builder_pill> + + <.ui_icon_button + size="xs" + disabled={not @supported} + aria-label="Remove filter" + title="Remove filter" + phx-click="srql_builder_remove_filter" + phx-value-idx={idx} + > + <.icon name="hero-x-mark" class="size-4" /> + </.ui_icon_button> + </div> + <% end %> + + <button + type="button" + class="inline-flex items-center gap-2 rounded-md border border-dashed border-primary/40 px-3 py-2 text-sm text-primary/80 hover:bg-primary/5 w-fit disabled:opacity-60" + phx-click="srql_builder_add_filter" + disabled={not @supported} + > + <.icon name="hero-plus" class="size-4" /> Add filter + </button> + </div> + </div> + + <div class="flex items-center gap-4 pt-2"> + <div class="text-xs text-base-content/60 font-medium">Sort</div> + <.srql_builder_pill label="Sort"> + <.ui_inline_input + type="text" + name="builder[sort_field]" + value={@builder["sort_field"] || ""} + class="w-44" + disabled={not @supported} + /> + <.ui_inline_select name="builder[sort_dir]" disabled={not @supported}> + <option value="desc" selected={(@builder["sort_dir"] || "desc") == "desc"}> + desc + </option> + <option value="asc" selected={@builder["sort_dir"] == "asc"}>asc</option> + </.ui_inline_select> + </.srql_builder_pill> + + <div class="text-xs text-base-content/60 font-medium">Limit</div> + <.srql_builder_pill label="Limit"> + <.ui_inline_input + type="number" + name="builder[limit]" + value={@builder["limit"] || ""} + min="1" + max="500" + class="w-24" + disabled={not @supported} + /> + </.srql_builder_pill> + </div> + + <div class="flex items-center gap-3 pt-4 mt-4 border-t border-base-200"> + <.ui_button variant="primary" size="sm" type="button" phx-click="srql_builder_run"> + Run Query + </.ui_button> + </div> + </div> + </div> + </div> + </div> + </form> + </.ui_panel> + """ + end + + slot :inner_block, required: true + attr :label, :string, required: true + attr :root, :boolean, default: false + + def srql_builder_pill(assigns) do + ~H""" + <div class="relative"> + <div :if={not @root} class="absolute -left-10 top-1/2 h-0.5 w-10 bg-primary/30" /> + <div class="inline-flex items-center gap-2 rounded-md border border-base-300 bg-base-100 px-3 py-2 shadow-sm"> + <.icon name="hero-check-mini" class="size-4 text-success opacity-80" /> + <span class="text-xs text-base-content/60">{@label}</span> + {render_slot(@inner_block)} + </div> + </div> + """ + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/components/ui_components.ex b/web-ng/lib/serviceradar_web_ng_web/components/ui_components.ex new file mode 100644 index 000000000..a14821a89 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/components/ui_components.ex @@ -0,0 +1,384 @@ +defmodule ServiceRadarWebNGWeb.UIComponents do + @moduledoc """ + App-level UI primitives built on Tailwind + daisyUI. + + Keep these components small and composable so we can swap/adjust styling + without touching feature templates. + """ + + use Phoenix.Component + import ServiceRadarWebNGWeb.CoreComponents, only: [icon: 1] + + attr :variant, :string, + default: "primary", + values: ~w(primary ghost soft neutral outline) + + attr :size, :string, default: "sm", values: ~w(xs sm md lg) + attr :square, :boolean, default: false + attr :active, :boolean, default: false + attr :class, :any, default: nil + + attr :rest, :global, include: ~w( + href navigate patch method download name value type disabled form + phx-click phx-value-idx phx-value-id phx-value-entity + aria-label aria-controls aria-expanded title + ) + + slot :inner_block, required: true + + def ui_button(%{rest: rest} = assigns) do + link_target = rest[:href] || rest[:navigate] || rest[:patch] + + assigns = + assigns + |> assign(:computed_class, ui_button_class(assigns)) + |> assign(:link?, link_target != nil) + + ~H""" + <.link :if={@link?} class={@computed_class} {@rest}> + {render_slot(@inner_block)} + </.link> + <button :if={not @link?} class={@computed_class} {@rest}> + {render_slot(@inner_block)} + </button> + """ + end + + attr :variant, :string, + default: "ghost", + values: ~w(primary ghost soft neutral outline) + + attr :size, :string, default: "sm", values: ~w(xs sm md lg) + attr :active, :boolean, default: false + attr :class, :any, default: nil + + attr :rest, :global, include: ~w( + href navigate patch method download name value type disabled form + phx-click phx-value-idx phx-value-id phx-value-entity + aria-label aria-controls aria-expanded title + ) + + slot :inner_block, required: true + + def ui_icon_button(assigns) do + assigns = + assigns + |> assign(:rest, Map.put_new(assigns.rest, :type, "button")) + |> assign(:square, true) + + ~H""" + <.ui_button + variant={@variant} + size={@size} + square={@square} + active={@active} + class={@class} + {@rest} + > + {render_slot(@inner_block)} + </.ui_button> + """ + end + + attr :variant, :string, default: "bordered", values: ~w(bordered ghost) + attr :size, :string, default: "sm", values: ~w(xs sm md lg) + attr :mono, :boolean, default: false + attr :class, :any, default: nil + + attr :rest, :global, include: ~w( + name value type placeholder autocomplete disabled form min max step inputmode + phx-debounce phx-throttle + ) + + def ui_input(assigns) do + assigns = assign(assigns, :computed_class, ui_input_class(assigns)) + + ~H""" + <input class={@computed_class} {@rest} /> + """ + end + + attr :class, :any, default: nil + attr :rest, :global, include: ~w(name value disabled) + + slot :inner_block, required: true + + def ui_inline_select(assigns) do + ~H""" + <select + class={["bg-transparent text-sm font-medium outline-none disabled:opacity-60", @class]} + {@rest} + > + {render_slot(@inner_block)} + </select> + """ + end + + attr :class, :any, default: nil + attr :rest, :global, include: ~w(name value type placeholder disabled min max step) + + def ui_inline_input(assigns) do + ~H""" + <input + class={["bg-transparent text-sm font-medium outline-none disabled:opacity-60", @class]} + {@rest} + /> + """ + end + + attr :variant, :string, default: "ghost", values: ~w(ghost warning success error info) + attr :size, :string, default: "sm", values: ~w(xs sm md) + attr :class, :any, default: nil + attr :rest, :global + slot :inner_block, required: true + + def ui_badge(assigns) do + assigns = assign(assigns, :computed_class, ui_badge_class(assigns)) + + ~H""" + <span class={@computed_class} {@rest}>{render_slot(@inner_block)}</span> + """ + end + + attr :class, :any, default: nil + slot :left + slot :right + slot :inner_block + + def ui_toolbar(assigns) do + ~H""" + <div class={["flex items-center justify-between gap-3", @class]}> + <div class="flex items-center gap-2 min-w-0"> + {render_slot(@left)} + {render_slot(@inner_block)} + </div> + <div class="flex items-center gap-2 shrink-0"> + {render_slot(@right)} + </div> + </div> + """ + end + + attr :tabs, :list, required: true + attr :size, :string, default: "sm", values: ~w(xs sm md) + attr :class, :any, default: nil + + def ui_tabs(assigns) do + ~H""" + <nav class={["flex items-center gap-1", @class]}> + <%= for tab <- @tabs do %> + <.ui_button + size={@size} + variant={Map.get(tab, :variant, "ghost")} + active={Map.get(tab, :active, false)} + href={Map.get(tab, :href)} + patch={Map.get(tab, :patch)} + navigate={Map.get(tab, :navigate)} + > + {Map.get(tab, :label)} + </.ui_button> + <% end %> + </nav> + """ + end + + attr :align, :string, default: "end", values: ~w(start end) + attr :class, :any, default: nil + slot :trigger, required: true + slot :item, required: true + + def ui_dropdown(assigns) do + ~H""" + <div class={[ + "dropdown", + @align == "start" && "dropdown-start", + @align == "end" && "dropdown-end", + @class + ]}> + <div tabindex="0" role="button"> + {render_slot(@trigger)} + </div> + <ul + tabindex="0" + class="menu dropdown-content bg-base-100 rounded-box z-30 w-56 p-2 shadow border border-base-200 mt-2" + > + <%= for item <- @item do %> + <li>{render_slot(item)}</li> + <% end %> + </ul> + </div> + """ + end + + attr :class, :any, default: nil + attr :header_class, :any, default: nil + attr :body_class, :any, default: nil + + slot :header + slot :inner_block, required: true + + def ui_panel(assigns) do + ~H""" + <section class={[ + "rounded-xl border border-base-200 bg-base-100 shadow-sm overflow-hidden", + @class + ]}> + <header + :if={@header != []} + class={[ + "px-4 py-3 bg-base-200/40 flex items-start justify-between gap-3", + @header_class + ]} + > + {render_slot(@header)} + </header> + <div class={["px-4 py-4", @body_class]}> + {render_slot(@inner_block)} + </div> + </section> + """ + end + + defp ui_button_class(assigns) do + [ + "btn", + ui_button_variant_class(assigns.variant), + ui_button_size_class(assigns.size), + assigns.square && "btn-square", + assigns.active && "btn-active", + assigns.class + ] + end + + defp ui_button_variant_class("primary"), do: "btn-primary" + defp ui_button_variant_class("ghost"), do: "btn-ghost" + defp ui_button_variant_class("neutral"), do: "btn-neutral" + defp ui_button_variant_class("outline"), do: "btn-outline" + defp ui_button_variant_class("soft"), do: "btn-primary btn-soft" + defp ui_button_variant_class(_), do: "btn-primary" + + defp ui_button_size_class("xs"), do: "btn-xs" + defp ui_button_size_class("sm"), do: "btn-sm" + defp ui_button_size_class("md"), do: "btn-md" + defp ui_button_size_class("lg"), do: "btn-lg" + defp ui_button_size_class(_), do: "btn-sm" + + defp ui_input_class(assigns) do + [ + "input", + ui_input_variant_class(assigns.variant), + ui_input_size_class(assigns.size), + assigns.mono && "font-mono", + assigns.class + ] + end + + defp ui_input_variant_class("ghost"), do: "input-ghost" + defp ui_input_variant_class(_), do: "input-bordered" + + defp ui_input_size_class("xs"), do: "input-xs" + defp ui_input_size_class("sm"), do: "input-sm" + defp ui_input_size_class("md"), do: "input-md" + defp ui_input_size_class("lg"), do: "input-lg" + defp ui_input_size_class(_), do: "input-sm" + + defp ui_badge_class(assigns) do + [ + "badge", + ui_badge_variant_class(assigns.variant), + ui_badge_size_class(assigns.size), + assigns.class + ] + end + + defp ui_badge_variant_class("warning"), do: "badge-warning" + defp ui_badge_variant_class("success"), do: "badge-success" + defp ui_badge_variant_class("error"), do: "badge-error" + defp ui_badge_variant_class("info"), do: "badge-info" + defp ui_badge_variant_class(_), do: "badge-ghost" + + defp ui_badge_size_class("xs"), do: "badge-xs" + defp ui_badge_size_class("sm"), do: "badge-sm" + defp ui_badge_size_class("md"), do: "badge-md" + defp ui_badge_size_class(_), do: "badge-sm" + + @doc """ + Cursor-based pagination component for SRQL-driven pages. + + Uses daisyUI join/button classes for styling. + """ + attr :prev_cursor, :string, default: nil + attr :next_cursor, :string, default: nil + attr :base_path, :string, required: true + attr :query, :string, default: "" + attr :limit, :integer, default: 20 + attr :result_count, :integer, default: 0 + attr :extra_params, :map, default: %{} + attr :class, :any, default: nil + + def ui_pagination(assigns) do + assigns = + assigns + |> assign(:has_prev, is_binary(assigns.prev_cursor) and assigns.prev_cursor != "") + |> assign(:has_next, is_binary(assigns.next_cursor) and assigns.next_cursor != "") + |> assign(:showing_text, pagination_text(assigns.result_count, assigns.limit)) + + ~H""" + <div class={["flex items-center justify-between gap-4", @class]}> + <div class="text-sm text-base-content/60"> + {@showing_text} + </div> + <div class="join"> + <.link + :if={@has_prev} + patch={pagination_href(@base_path, @query, @limit, @prev_cursor, @extra_params)} + class="join-item btn btn-sm btn-outline" + > + <.icon name="hero-chevron-left" class="size-4" /> Previous + </.link> + <button :if={not @has_prev} class="join-item btn btn-sm btn-outline" disabled> + <.icon name="hero-chevron-left" class="size-4" /> Previous + </button> + + <.link + :if={@has_next} + patch={pagination_href(@base_path, @query, @limit, @next_cursor, @extra_params)} + class="join-item btn btn-sm btn-outline" + > + Next <.icon name="hero-chevron-right" class="size-4" /> + </.link> + <button :if={not @has_next} class="join-item btn btn-sm btn-outline" disabled> + Next <.icon name="hero-chevron-right" class="size-4" /> + </button> + </div> + </div> + """ + end + + defp pagination_href(base_path, query, limit, cursor, extra_params) do + base = + extra_params + |> normalize_query_params() + |> Map.merge(%{"q" => query, "limit" => limit, "cursor" => cursor}) + + base_path <> "?" <> URI.encode_query(base) + end + + defp normalize_query_params(%{} = params) do + params + |> Enum.reduce(%{}, fn + {k, v}, acc when is_atom(k) -> Map.put(acc, Atom.to_string(k), v) + {k, v}, acc when is_binary(k) -> Map.put(acc, k, v) + _, acc -> acc + end) + |> Map.reject(fn {_k, v} -> is_nil(v) or v == "" end) + end + + defp normalize_query_params(_), do: %{} + + defp pagination_text(count, _limit) when is_integer(count) and count > 0 do + "Showing #{count} result#{if count != 1, do: "s", else: ""}" + end + + defp pagination_text(_, _), do: "No results" +end diff --git a/web-ng/lib/serviceradar_web_ng_web/controllers/error_html.ex b/web-ng/lib/serviceradar_web_ng_web/controllers/error_html.ex new file mode 100644 index 000000000..4fb21a3f3 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/controllers/error_html.ex @@ -0,0 +1,24 @@ +defmodule ServiceRadarWebNGWeb.ErrorHTML do + @moduledoc """ + This module is invoked by your endpoint in case of errors on HTML requests. + + See config/config.exs. + """ + use ServiceRadarWebNGWeb, :html + + # If you want to customize your error pages, + # uncomment the embed_templates/1 call below + # and add pages to the error directory: + # + # * lib/serviceradar_web_ng_web/controllers/error_html/404.html.heex + # * lib/serviceradar_web_ng_web/controllers/error_html/500.html.heex + # + # embed_templates "error_html/*" + + # The default is to render a plain text page based on + # the template name. For example, "404.html" becomes + # "Not Found". + def render(template, _assigns) do + Phoenix.Controller.status_message_from_template(template) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/controllers/error_json.ex b/web-ng/lib/serviceradar_web_ng_web/controllers/error_json.ex new file mode 100644 index 000000000..00b6b89e0 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/controllers/error_json.ex @@ -0,0 +1,21 @@ +defmodule ServiceRadarWebNGWeb.ErrorJSON do + @moduledoc """ + This module is invoked by your endpoint in case of errors on JSON requests. + + See config/config.exs. + """ + + # If you want to customize a particular status code, + # you may add your own clauses, such as: + # + # def render("500.json", _assigns) do + # %{errors: %{detail: "Internal Server Error"}} + # end + + # By default, Phoenix returns the status message from + # the template name. For example, "404.json" becomes + # "Not Found". + def render(template, _assigns) do + %{errors: %{detail: Phoenix.Controller.status_message_from_template(template)}} + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/controllers/page_controller.ex b/web-ng/lib/serviceradar_web_ng_web/controllers/page_controller.ex new file mode 100644 index 000000000..676db4e56 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/controllers/page_controller.ex @@ -0,0 +1,15 @@ +defmodule ServiceRadarWebNGWeb.PageController do + use ServiceRadarWebNGWeb, :controller + + def home(conn, _params) do + if conn.assigns.current_scope && conn.assigns.current_scope.user do + redirect(conn, to: ~p"/analytics") + else + render(conn, :home) + end + end + + def redirect_to_analytics(conn, _params) do + redirect(conn, to: ~p"/analytics") + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/controllers/page_html.ex b/web-ng/lib/serviceradar_web_ng_web/controllers/page_html.ex new file mode 100644 index 000000000..bad992cf1 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/controllers/page_html.ex @@ -0,0 +1,10 @@ +defmodule ServiceRadarWebNGWeb.PageHTML do + @moduledoc """ + This module contains pages rendered by PageController. + + See the `page_html` directory for all templates available. + """ + use ServiceRadarWebNGWeb, :html + + embed_templates "page_html/*" +end diff --git a/web-ng/lib/serviceradar_web_ng_web/controllers/page_html/home.html.heex b/web-ng/lib/serviceradar_web_ng_web/controllers/page_html/home.html.heex new file mode 100644 index 000000000..b107fd01b --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/controllers/page_html/home.html.heex @@ -0,0 +1,202 @@ +<Layouts.flash_group flash={@flash} /> +<div class="left-[40rem] fixed inset-y-0 right-0 z-0 hidden lg:block xl:left-[50rem]"> + <svg + viewBox="0 0 1480 957" + fill="none" + aria-hidden="true" + class="absolute inset-0 h-full w-full" + preserveAspectRatio="xMinYMid slice" + > + <path fill="#EE7868" d="M0 0h1480v957H0z" /> + <path + d="M137.542 466.27c-582.851-48.41-988.806-82.127-1608.412 658.2l67.39 810 3083.15-256.51L1535.94-49.622l-98.36 8.183C1269.29 281.468 734.115 515.799 146.47 467.012l-8.928-.742Z" + fill="#FF9F92" + /> + <path + d="M371.028 528.664C-169.369 304.988-545.754 149.198-1361.45 665.565l-182.58 792.025 3014.73 694.98 389.42-1689.25-96.18-22.171C1505.28 697.438 924.153 757.586 379.305 532.09l-8.277-3.426Z" + fill="#FA8372" + /> + <path + d="M359.326 571.714C-104.765 215.795-428.003-32.102-1349.55 255.554l-282.3 1224.596 3047.04 722.01 312.24-1354.467C1411.25 1028.3 834.355 935.995 366.435 577.166l-7.109-5.452Z" + fill="#E96856" + fill-opacity=".6" + /> + <path + d="M1593.87 1236.88c-352.15 92.63-885.498-145.85-1244.602-613.557l-5.455-7.105C-12.347 152.31-260.41-170.8-1225-131.458l-368.63 1599.048 3057.19 704.76 130.31-935.47Z" + fill="#C42652" + fill-opacity=".2" + /> + <path + d="M1411.91 1526.93c-363.79 15.71-834.312-330.6-1085.883-863.909l-3.822-8.102C72.704 125.95-101.074-242.476-1052.01-408.907l-699.85 1484.267 2837.75 1338.01 326.02-886.44Z" + fill="#A41C42" + fill-opacity=".2" + /> + <path + d="M1116.26 1863.69c-355.457-78.98-720.318-535.27-825.287-1115.521l-1.594-8.816C185.286 163.833 112.786-237.016-762.678-643.898L-1822.83 608.665 571.922 2635.55l544.338-771.86Z" + fill="#A41C42" + fill-opacity=".2" + /> + </svg> +</div> +<div class="px-4 py-10 sm:px-6 sm:py-28 lg:px-8 xl:px-28 xl:py-32"> + <div class="mx-auto max-w-xl lg:mx-0"> + <svg viewBox="0 0 71 48" class="h-12" aria-hidden="true"> + <path + d="m26.371 33.477-.552-.1c-3.92-.729-6.397-3.1-7.57-6.829-.733-2.324.597-4.035 3.035-4.148 1.995-.092 3.362 1.055 4.57 2.39 1.557 1.72 2.984 3.558 4.514 5.305 2.202 2.515 4.797 4.134 8.347 3.634 3.183-.448 5.958-1.725 8.371-3.828.363-.316.761-.592 1.144-.886l-.241-.284c-2.027.63-4.093.841-6.205.735-3.195-.16-6.24-.828-8.964-2.582-2.486-1.601-4.319-3.746-5.19-6.611-.704-2.315.736-3.934 3.135-3.6.948.133 1.746.56 2.463 1.165.583.493 1.143 1.015 1.738 1.493 2.8 2.25 6.712 2.375 10.265-.068-5.842-.026-9.817-3.24-13.308-7.313-1.366-1.594-2.7-3.216-4.095-4.785-2.698-3.036-5.692-5.71-9.79-6.623C12.8-.623 7.745.14 2.893 2.361 1.926 2.804.997 3.319 0 4.149c.494 0 .763.006 1.032 0 2.446-.064 4.28 1.023 5.602 3.024.962 1.457 1.415 3.104 1.761 4.798.513 2.515.247 5.078.544 7.605.761 6.494 4.08 11.026 10.26 13.346 2.267.852 4.591 1.135 7.172.555ZM10.751 3.852c-.976.246-1.756-.148-2.56-.962 1.377-.343 2.592-.476 3.897-.528-.107.848-.607 1.306-1.336 1.49Zm32.002 37.924c-.085-.626-.62-.901-1.04-1.228-1.857-1.446-4.03-1.958-6.333-2-1.375-.026-2.735-.128-4.031-.61-.595-.22-1.26-.505-1.244-1.272.015-.78.693-1 1.31-1.184.505-.15 1.026-.247 1.6-.382-1.46-.936-2.886-1.065-4.787-.3-2.993 1.202-5.943 1.06-8.926-.017-1.684-.608-3.179-1.563-4.735-2.408l-.043.03a2.96 2.96 0 0 0 .04-.029c-.038-.117-.107-.12-.197-.054l.122.107c1.29 2.115 3.034 3.817 5.004 5.271 3.793 2.8 7.936 4.471 12.784 3.73A66.714 66.714 0 0 1 37 40.877c1.98-.16 3.866.398 5.753.899Zm-9.14-30.345c-.105-.076-.206-.266-.42-.069 1.745 2.36 3.985 4.098 6.683 5.193 4.354 1.767 8.773 2.07 13.293.51 3.51-1.21 6.033-.028 7.343 3.38.19-3.955-2.137-6.837-5.843-7.401-2.084-.318-4.01.373-5.962.94-5.434 1.575-10.485.798-15.094-2.553Zm27.085 15.425c.708.059 1.416.123 2.124.185-1.6-1.405-3.55-1.517-5.523-1.404-3.003.17-5.167 1.903-7.14 3.972-1.739 1.824-3.31 3.87-5.903 4.604.043.078.054.117.066.117.35.005.699.021 1.047.005 3.768-.17 7.317-.965 10.14-3.7.89-.86 1.685-1.817 2.544-2.71.716-.746 1.584-1.159 2.645-1.07Zm-8.753-4.67c-2.812.246-5.254 1.409-7.548 2.943-1.766 1.18-3.654 1.738-5.776 1.37-.374-.066-.75-.114-1.124-.17l-.013.156c.135.07.265.151.405.207.354.14.702.308 1.07.395 4.083.971 7.992.474 11.516-1.803 2.221-1.435 4.521-1.707 7.013-1.336.252.038.503.083.756.107.234.022.479.255.795.003-2.179-1.574-4.526-2.096-7.094-1.872Zm-10.049-9.544c1.475.051 2.943-.142 4.486-1.059-.452.04-.643.04-.827.076-2.126.424-4.033-.04-5.733-1.383-.623-.493-1.257-.974-1.889-1.457-2.503-1.914-5.374-2.555-8.514-2.5.05.154.054.26.108.315 3.417 3.455 7.371 5.836 12.369 6.008Zm24.727 17.731c-2.114-2.097-4.952-2.367-7.578-.537 1.738.078 3.043.632 4.101 1.728.374.388.763.768 1.182 1.106 1.6 1.29 4.311 1.352 5.896.155-1.861-.726-1.861-.726-3.601-2.452Zm-21.058 16.06c-1.858-3.46-4.981-4.24-8.59-4.008a9.667 9.667 0 0 1 2.977 1.39c.84.586 1.547 1.311 2.243 2.055 1.38 1.473 3.534 2.376 4.962 2.07-.656-.412-1.238-.848-1.592-1.507Zm17.29-19.32c0-.023.001-.045.003-.068l-.006.006.006-.006-.036-.004.021.018.012.053Zm-20 14.744a7.61 7.61 0 0 0-.072-.041.127.127 0 0 0 .015.043c.005.008.038 0 .058-.002Zm-.072-.041-.008-.034-.008.01.008-.01-.022-.006.005.026.024.014Z" + fill="#FD4F00" + /> + </svg> + <div class="mt-10 flex justify-between items-center"> + <h1 class="flex items-center text-sm font-semibold leading-6"> + Phoenix Framework + <small class="badge badge-warning badge-sm ml-3"> + v{Application.spec(:phoenix, :vsn)} + </small> + </h1> + <Layouts.theme_toggle /> + </div> + + <p class="text-[2rem] mt-4 font-semibold leading-10 tracking-tighter text-balance"> + Peace of mind from prototype to production. + </p> + <p class="mt-4 leading-7 text-base-content/70"> + Build rich, interactive web applications quickly, with less code and fewer moving parts. Join our growing community of developers using Phoenix to craft APIs, HTML5 apps and more, for fun or at scale. + </p> + <div class="flex"> + <div class="w-full sm:w-auto"> + <div class="mt-10 grid grid-cols-1 gap-x-6 gap-y-4 sm:grid-cols-3"> + <a + href="https://hexdocs.pm/phoenix/overview.html" + class="group relative rounded-box px-6 py-4 text-sm font-semibold leading-6 sm:py-6" + > + <span class="absolute inset-0 rounded-box bg-base-200 transition group-hover:bg-base-300 sm:group-hover:scale-105"> + </span> + <span class="relative flex items-center gap-4 sm:flex-col"> + <svg viewBox="0 0 24 24" fill="none" aria-hidden="true" class="h-6 w-6"> + <path d="m12 4 10-2v18l-10 2V4Z" fill="currentColor" fill-opacity=".15" /> + <path + d="M12 4 2 2v18l10 2m0-18v18m0-18 10-2v18l-10 2" + stroke="currentColor" + stroke-width="2" + stroke-linecap="round" + stroke-linejoin="round" + /> + </svg> + Guides & Docs + </span> + </a> + <a + href="https://github.com/phoenixframework/phoenix" + class="group relative rounded-box px-6 py-4 text-sm font-semibold leading-6 sm:py-6" + > + <span class="absolute inset-0 rounded-box bg-base-200 transition group-hover:bg-base-300 sm:group-hover:scale-105"> + </span> + <span class="relative flex items-center gap-4 sm:flex-col"> + <svg viewBox="0 0 24 24" aria-hidden="true" class="h-6 w-6"> + <path + fill="currentColor" + fill-rule="evenodd" + clip-rule="evenodd" + d="M12 0C5.37 0 0 5.506 0 12.303c0 5.445 3.435 10.043 8.205 11.674.6.107.825-.262.825-.585 0-.292-.015-1.261-.015-2.291C6 21.67 5.22 20.346 4.98 19.654c-.135-.354-.72-1.446-1.23-1.738-.42-.23-1.02-.8-.015-.815.945-.015 1.62.892 1.845 1.261 1.08 1.86 2.805 1.338 3.495 1.015.105-.8.42-1.338.765-1.645-2.67-.308-5.46-1.37-5.46-6.075 0-1.338.465-2.446 1.23-3.307-.12-.308-.54-1.569.12-3.26 0 0 1.005-.323 3.3 1.26.96-.276 1.98-.415 3-.415s2.04.139 3 .416c2.295-1.6 3.3-1.261 3.3-1.261.66 1.691.24 2.952.12 3.26.765.861 1.23 1.953 1.23 3.307 0 4.721-2.805 5.767-5.475 6.075.435.384.81 1.122.81 2.276 0 1.645-.015 2.968-.015 3.383 0 .323.225.707.825.585a12.047 12.047 0 0 0 5.919-4.489A12.536 12.536 0 0 0 24 12.304C24 5.505 18.63 0 12 0Z" + /> + </svg> + Source Code + </span> + </a> + <a + href={"https://github.com/phoenixframework/phoenix/blob/v#{Application.spec(:phoenix, :vsn)}/CHANGELOG.md"} + class="group relative rounded-box px-6 py-4 text-sm font-semibold leading-6 sm:py-6" + > + <span class="absolute inset-0 rounded-box bg-base-200 transition group-hover:bg-base-300 sm:group-hover:scale-105"> + </span> + <span class="relative flex items-center gap-4 sm:flex-col"> + <svg viewBox="0 0 24 24" fill="none" aria-hidden="true" class="h-6 w-6"> + <path + d="M12 1v6M12 17v6" + stroke="currentColor" + stroke-width="2" + stroke-linecap="round" + stroke-linejoin="round" + /> + <circle + cx="12" + cy="12" + r="4" + fill="currentColor" + fill-opacity=".15" + stroke="currentColor" + stroke-width="2" + stroke-linecap="round" + stroke-linejoin="round" + /> + </svg> + Changelog + </span> + </a> + </div> + <div class="mt-10 grid grid-cols-1 gap-y-4 text-sm leading-6 text-base-content/80 sm:grid-cols-2"> + <div> + <a + href="https://elixirforum.com" + class="group -mx-2 -my-0.5 inline-flex items-center gap-3 rounded-lg px-2 py-0.5 hover:bg-base-200 hover:text-base-content" + > + <svg + viewBox="0 0 16 16" + aria-hidden="true" + class="h-4 w-4 fill-base-content/40 group-hover:fill-base-content" + > + <path d="M8 13.833c3.866 0 7-2.873 7-6.416C15 3.873 11.866 1 8 1S1 3.873 1 7.417c0 1.081.292 2.1.808 2.995.606 1.05.806 2.399.086 3.375l-.208.283c-.285.386-.01.905.465.85.852-.098 2.048-.318 3.137-.81a3.717 3.717 0 0 1 1.91-.318c.263.027.53.041.802.041Z" /> + </svg> + Discuss on the Elixir Forum + </a> + </div> + <div> + <a + href="https://discord.gg/elixir" + class="group -mx-2 -my-0.5 inline-flex items-center gap-3 rounded-lg px-2 py-0.5 hover:bg-base-200 hover:text-base-content" + > + <svg + viewBox="0 0 16 16" + aria-hidden="true" + class="h-4 w-4 fill-base-content/40 group-hover:fill-base-content" + > + <path d="M13.545 2.995c-1.02-.46-2.114-.8-3.257-.994a.05.05 0 0 0-.052.024c-.141.246-.297.567-.406.82a12.377 12.377 0 0 0-3.658 0 8.238 8.238 0 0 0-.412-.82.052.052 0 0 0-.052-.024 13.315 13.315 0 0 0-3.257.994.046.046 0 0 0-.021.018C.356 6.063-.213 9.036.066 11.973c.001.015.01.029.02.038a13.353 13.353 0 0 0 3.996 1.987.052.052 0 0 0 .056-.018c.308-.414.582-.85.818-1.309a.05.05 0 0 0-.028-.069 8.808 8.808 0 0 1-1.248-.585.05.05 0 0 1-.005-.084c.084-.062.168-.126.248-.191a.05.05 0 0 1 .051-.007c2.619 1.176 5.454 1.176 8.041 0a.05.05 0 0 1 .053.006c.08.065.164.13.248.192a.05.05 0 0 1-.004.084c-.399.23-.813.423-1.249.585a.05.05 0 0 0-.027.07c.24.457.514.893.817 1.307a.051.051 0 0 0 .056.019 13.31 13.31 0 0 0 4.001-1.987.05.05 0 0 0 .021-.037c.334-3.396-.559-6.345-2.365-8.96a.04.04 0 0 0-.021-.02Zm-8.198 7.19c-.789 0-1.438-.712-1.438-1.587 0-.874.637-1.586 1.438-1.586.807 0 1.45.718 1.438 1.586 0 .875-.637 1.587-1.438 1.587Zm5.316 0c-.788 0-1.438-.712-1.438-1.587 0-.874.637-1.586 1.438-1.586.807 0 1.45.718 1.438 1.586 0 .875-.63 1.587-1.438 1.587Z" /> + </svg> + Join our Discord server + </a> + </div> + <div> + <a + href="https://elixir-slack.community/" + class="group -mx-2 -my-0.5 inline-flex items-center gap-3 rounded-lg px-2 py-0.5 hover:bg-base-200 hover:text-base-content" + > + <svg + viewBox="0 0 16 16" + aria-hidden="true" + class="h-4 w-4 fill-base-content/40 group-hover:fill-base-content" + > + <path d="M3.361 10.11a1.68 1.68 0 1 1-1.68-1.681h1.68v1.682ZM4.209 10.11a1.68 1.68 0 1 1 3.361 0v4.21a1.68 1.68 0 1 1-3.361 0v-4.21ZM5.89 3.361a1.68 1.68 0 1 1 1.681-1.68v1.68H5.89ZM5.89 4.209a1.68 1.68 0 1 1 0 3.361H1.68a1.68 1.68 0 1 1 0-3.361h4.21ZM12.639 5.89a1.68 1.68 0 1 1 1.68 1.681h-1.68V5.89ZM11.791 5.89a1.68 1.68 0 1 1-3.361 0V1.68a1.68 1.68 0 0 1 3.361 0v4.21ZM10.11 12.639a1.68 1.68 0 1 1-1.681 1.68v-1.68h1.682ZM10.11 11.791a1.68 1.68 0 1 1 0-3.361h4.21a1.68 1.68 0 1 1 0 3.361h-4.21Z" /> + </svg> + Join us on Slack + </a> + </div> + <div> + <a + href="https://fly.io/docs/elixir/getting-started/" + class="group -mx-2 -my-0.5 inline-flex items-center gap-3 rounded-lg px-2 py-0.5 hover:bg-base-200 hover:text-base-content" + > + <svg + viewBox="0 0 20 20" + aria-hidden="true" + class="h-4 w-4 fill-base-content/40 group-hover:fill-base-content" + > + <path d="M1 12.5A4.5 4.5 0 005.5 17H15a4 4 0 001.866-7.539 3.504 3.504 0 00-4.504-4.272A4.5 4.5 0 004.06 8.235 4.502 4.502 0 001 12.5z" /> + </svg> + Deploy your application + </a> + </div> + </div> + </div> + </div> + </div> +</div> diff --git a/web-ng/lib/serviceradar_web_ng_web/controllers/user_session_controller.ex b/web-ng/lib/serviceradar_web_ng_web/controllers/user_session_controller.ex new file mode 100644 index 000000000..a14cca568 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/controllers/user_session_controller.ex @@ -0,0 +1,67 @@ +defmodule ServiceRadarWebNGWeb.UserSessionController do + use ServiceRadarWebNGWeb, :controller + + alias ServiceRadarWebNG.Accounts + alias ServiceRadarWebNGWeb.UserAuth + + def create(conn, %{"_action" => "confirmed"} = params) do + create(conn, params, "User confirmed successfully.") + end + + def create(conn, params) do + create(conn, params, "Welcome back!") + end + + # magic link login + defp create(conn, %{"user" => %{"token" => token} = user_params}, info) do + case Accounts.login_user_by_magic_link(token) do + {:ok, {user, tokens_to_disconnect}} -> + UserAuth.disconnect_sessions(tokens_to_disconnect) + + conn + |> put_flash(:info, info) + |> UserAuth.log_in_user(user, user_params) + + _ -> + conn + |> put_flash(:error, "The link is invalid or it has expired.") + |> redirect(to: ~p"/users/log-in") + end + end + + # email + password login + defp create(conn, %{"user" => user_params}, info) do + %{"email" => email, "password" => password} = user_params + + if user = Accounts.get_user_by_email_and_password(email, password) do + conn + |> put_flash(:info, info) + |> UserAuth.log_in_user(user, user_params) + else + # In order to prevent user enumeration attacks, don't disclose whether the email is registered. + conn + |> put_flash(:error, "Invalid email or password") + |> put_flash(:email, String.slice(email, 0, 160)) + |> redirect(to: ~p"/users/log-in") + end + end + + def update_password(conn, %{"user" => user_params} = params) do + user = conn.assigns.current_scope.user + true = Accounts.sudo_mode?(user) + {:ok, {_user, expired_tokens}} = Accounts.update_user_password(user, user_params) + + # disconnect all existing LiveViews with old sessions + UserAuth.disconnect_sessions(expired_tokens) + + conn + |> put_session(:user_return_to, ~p"/users/settings") + |> create(params, "Password updated successfully!") + end + + def delete(conn, _params) do + conn + |> put_flash(:info, "Logged out successfully.") + |> UserAuth.log_out_user() + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/engine.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/engine.ex new file mode 100644 index 000000000..18bc0c408 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/engine.ex @@ -0,0 +1,90 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Engine do + @moduledoc false + + alias ServiceRadarWebNGWeb.Dashboard.Registry + + @type srql_response :: map() + + @type panel :: %{ + id: String.t(), + plugin: module(), + title: String.t(), + assigns: map() + } + + def build_panels(%{} = srql_response) do + plugins = Registry.plugins() + + {table_plugins, other_plugins} = + Enum.split_with(plugins, fn plugin -> + plugin == ServiceRadarWebNGWeb.Dashboard.Plugins.Table + end) + + other_panels = + other_plugins + |> Enum.filter(fn plugin -> + Code.ensure_loaded?(plugin) and plugin.supports?(srql_response) + end) + |> Enum.map(&plugin_panel(&1, srql_response)) + |> Enum.reject(&is_nil/1) + + table_panel = + case table_plugins do + [table_plugin | _] -> plugin_panel(table_plugin, srql_response) + _ -> nil + end + + cond do + other_panels == [] -> + case table_panel do + %{} -> [table_panel] + _ -> [fallback_panel(srql_response)] + end + + is_map(table_panel) -> + other_panels ++ [table_panel] + + true -> + other_panels + end + end + + def build_panels(_), do: [fallback_panel(%{"results" => []})] + + defp plugin_panel(plugin, srql_response) do + base = %{ + id: plugin_id(plugin), + plugin: plugin, + title: plugin_title(plugin), + assigns: %{} + } + + case plugin.build(srql_response) do + {:ok, assigns} when is_map(assigns) -> + %{base | assigns: assigns} + + _ -> + nil + end + end + + defp fallback_panel(srql_response) do + plugin_panel(ServiceRadarWebNGWeb.Dashboard.Plugins.Table, srql_response) + end + + defp plugin_id(plugin) do + if function_exported?(plugin, :id, 0) do + plugin.id() + else + plugin |> Module.split() |> List.last() |> to_string() + end + end + + defp plugin_title(plugin) do + if function_exported?(plugin, :title, 0) do + plugin.title() + else + plugin |> Module.split() |> List.last() |> to_string() + end + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/plugin.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugin.ex new file mode 100644 index 000000000..ab4218fc5 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugin.ex @@ -0,0 +1,10 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Plugin do + @moduledoc false + + @type srql_response :: map() + + @callback id() :: String.t() + @callback title() :: String.t() + @callback supports?(srql_response()) :: boolean() + @callback build(srql_response()) :: {:ok, map()} | {:error, term()} +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/categories.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/categories.ex new file mode 100644 index 000000000..1cca1c985 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/categories.ex @@ -0,0 +1,53 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Plugins.Categories do + @moduledoc false + + use Phoenix.LiveComponent + + @behaviour ServiceRadarWebNGWeb.Dashboard.Plugin + + alias ServiceRadarWebNGWeb.SRQL.Viz + + import ServiceRadarWebNGWeb.SRQLComponents, only: [srql_auto_viz: 1] + + @impl true + def id, do: "categories" + + @impl true + def title, do: "Categories" + + @impl true + def supports?(%{"results" => results}) when is_list(results) do + match?({:categories, _}, Viz.infer(results)) + end + + def supports?(_), do: false + + @impl true + def build(%{"results" => results}) when is_list(results) do + case Viz.infer(results) do + {:categories, _} = viz -> {:ok, %{viz: viz}} + _ -> {:error, :not_categories} + end + end + + def build(_), do: {:error, :invalid_response} + + @impl true + def update(%{panel_assigns: panel_assigns} = assigns, socket) do + socket = + socket + |> assign(Map.drop(assigns, [:panel_assigns])) + |> assign(panel_assigns || %{}) + + {:ok, socket} + end + + @impl true + def render(assigns) do + ~H""" + <div id={"panel-#{@id}"}> + <.srql_auto_viz viz={@viz} /> + </div> + """ + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/graph_result.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/graph_result.ex new file mode 100644 index 000000000..2b1bcba66 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/graph_result.ex @@ -0,0 +1,106 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Plugins.GraphResult do + @moduledoc false + + use Phoenix.LiveComponent + + @behaviour ServiceRadarWebNGWeb.Dashboard.Plugin + + import ServiceRadarWebNGWeb.UIComponents, only: [ui_panel: 1] + + @max_preview 20 + + @impl true + def id, do: "graph_result" + + @impl true + def title, do: "Graph" + + @impl true + def supports?(%{"viz" => %{"columns" => columns}, "results" => results}) + when is_list(columns) and is_list(results) do + graphish_viz?(columns) or graphish_results?(results) + end + + def supports?(_), do: false + + @impl true + def build(%{"results" => results} = _srql_response) when is_list(results) do + {:ok, + %{ + max_preview: @max_preview, + items: Enum.take(results, @max_preview), + summary: summarize(results) + }} + end + + def build(_), do: {:error, :invalid_response} + + defp graphish_viz?(columns) do + Enum.any?(columns, fn + %{"name" => "result", "type" => "jsonb"} -> true + %{"name" => "result"} -> true + _ -> false + end) + end + + defp graphish_results?(results) do + Enum.any?(results, fn + %{"nodes" => _nodes, "edges" => _edges} -> true + %{"vertices" => _v, "edges" => _e} -> true + %{"result" => %{} = _} -> true + _ -> false + end) + end + + defp summarize(results) do + Enum.reduce(results, %{nodes: 0, edges: 0}, fn item, acc -> + case item do + %{"nodes" => nodes, "edges" => edges} when is_list(nodes) and is_list(edges) -> + %{acc | nodes: acc.nodes + length(nodes), edges: acc.edges + length(edges)} + + %{"vertices" => nodes, "edges" => edges} when is_list(nodes) and is_list(edges) -> + %{acc | nodes: acc.nodes + length(nodes), edges: acc.edges + length(edges)} + + _ -> + acc + end + end) + end + + @impl true + def update(%{panel_assigns: panel_assigns} = assigns, socket) do + socket = + socket + |> assign(Map.drop(assigns, [:panel_assigns])) + |> assign(panel_assigns || %{}) + + {:ok, socket} + end + + @impl true + def render(assigns) do + ~H""" + <div id={"panel-#{@id}"}> + <.ui_panel> + <:header> + <div class="min-w-0"> + <div class="text-sm font-semibold">Graph</div> + <div class="text-xs text-base-content/70"> + Nodes: <span class="font-mono">{@summary.nodes}</span> + <span class="opacity-60">·</span> Edges: <span class="font-mono">{@summary.edges}</span> + </div> + </div> + </:header> + + <div class="text-xs text-base-content/70 mb-3"> + Preview of the first {@max_preview} graph result rows. + </div> + + <div class="rounded-xl border border-base-200 bg-base-100 p-3 overflow-x-auto"> + <pre class="text-xs leading-relaxed"><%= Jason.encode!(@items, pretty: true) %></pre> + </div> + </.ui_panel> + </div> + """ + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/table.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/table.ex new file mode 100644 index 000000000..98cdc7d6f --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/table.ex @@ -0,0 +1,63 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Plugins.Table do + @moduledoc false + + use Phoenix.LiveComponent + + @behaviour ServiceRadarWebNGWeb.Dashboard.Plugin + + import ServiceRadarWebNGWeb.SRQLComponents, only: [srql_results_table: 1] + import ServiceRadarWebNGWeb.UIComponents, only: [ui_panel: 1] + + @impl true + def id, do: "table" + + @impl true + def title, do: "Table" + + @impl true + def supports?(_srql_response), do: true + + @impl true + def build(%{} = srql_response) do + results = + srql_response + |> Map.get("results", []) + |> normalize_results() + + {:ok, %{results: results}} + end + + @impl true + def update(%{panel_assigns: panel_assigns} = assigns, socket) do + socket = + socket + |> assign(Map.drop(assigns, [:panel_assigns])) + |> assign(panel_assigns || %{}) + + {:ok, socket} + end + + defp normalize_results(results) when is_list(results) do + Enum.map(results, fn + %{} = row -> row + value -> %{"value" => value} + end) + end + + defp normalize_results(_), do: [] + + @impl true + def render(assigns) do + ~H""" + <div id={"panel-#{@id}"}> + <.ui_panel> + <:header> + <div class="text-sm font-semibold">Table</div> + </:header> + + <.srql_results_table id={"panel-#{@id}-table"} rows={@results} empty_message="No results." /> + </.ui_panel> + </div> + """ + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/timeseries.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/timeseries.ex new file mode 100644 index 000000000..e7693010d --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/timeseries.ex @@ -0,0 +1,457 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Plugins.Timeseries do + @moduledoc false + + use Phoenix.LiveComponent + + @behaviour ServiceRadarWebNGWeb.Dashboard.Plugin + + import ServiceRadarWebNGWeb.UIComponents, only: [ui_panel: 1] + + @max_series 6 + @max_points 200 + @chart_width 800 + @chart_height 140 + @chart_pad 8 + + @impl true + def id, do: "timeseries" + + @impl true + def title, do: "Timeseries" + + @impl true + def supports?(%{"viz" => %{"suggestions" => suggestions}}) when is_list(suggestions) do + Enum.any?(suggestions, fn + %{"kind" => "timeseries"} -> true + _ -> false + end) + end + + def supports?(_), do: false + + @impl true + def build(%{"results" => results, "viz" => viz} = _srql_response) + when is_list(results) and is_map(viz) do + with {:ok, spec} <- parse_timeseries_spec(viz), + {:ok, series_points} <- extract_series_points(results, spec) do + {:ok, %{spec: spec, series_points: series_points}} + end + end + + def build(_), do: {:error, :invalid_response} + + defp parse_timeseries_spec(%{"suggestions" => suggestions}) when is_list(suggestions) do + suggestion = + Enum.find(suggestions, fn + %{"kind" => "timeseries"} -> true + _ -> false + end) + + case suggestion do + %{"x" => x, "y" => y, "series" => series} + when is_binary(x) and is_binary(y) and is_binary(series) -> + {:ok, %{x: x, y: y, series: series}} + + %{"x" => x, "y" => y} when is_binary(x) and is_binary(y) -> + {:ok, %{x: x, y: y, series: nil}} + + _ -> + {:error, :missing_timeseries_suggestion} + end + end + + defp parse_timeseries_spec(_), do: {:error, :missing_suggestions} + + defp extract_series_points(results, %{x: x, y: y, series: series_key}) do + rows = + results + |> Enum.filter(&is_map/1) + |> Enum.take(@max_points) + + points = + Enum.reduce(rows, %{}, fn row, acc -> + series = + if is_binary(series_key) do + row + |> Map.get(series_key) + |> safe_to_string() + |> String.trim() + |> normalize_series_label() + else + "series" + end + + with {:ok, dt} <- parse_datetime(Map.get(row, x)), + {:ok, value} <- parse_number(Map.get(row, y)) do + Map.update(acc, series, [{dt, value}], fn existing -> existing ++ [{dt, value}] end) + else + _ -> acc + end + end) + + series_points = + points + |> Enum.sort_by(fn {series, _points} -> series end) + |> Enum.take(@max_series) + + {:ok, series_points} + end + + defp parse_number(value) when is_integer(value), do: {:ok, value * 1.0} + defp parse_number(value) when is_float(value), do: {:ok, value} + + defp parse_number(value) when is_binary(value) do + value = String.trim(value) + + cond do + value == "" -> + {:error, :empty} + + match?({_, ""}, Float.parse(value)) -> + {v, ""} = Float.parse(value) + {:ok, v} + + match?({_, ""}, Integer.parse(value)) -> + {v, ""} = Integer.parse(value) + {:ok, v * 1.0} + + true -> + {:error, :nan} + end + end + + defp parse_number(_), do: {:error, :not_numeric} + + defp parse_datetime(%DateTime{} = dt), do: {:ok, dt} + + defp parse_datetime(%NaiveDateTime{} = ndt) do + {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + end + + defp parse_datetime(value) when is_binary(value) do + value = String.trim(value) + + with {:error, _} <- DateTime.from_iso8601(value), + {:ok, ndt} <- NaiveDateTime.from_iso8601(value) do + {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + else + {:ok, dt, _offset} -> {:ok, dt} + {:error, _} -> {:error, :invalid_datetime} + end + end + + defp parse_datetime(_), do: {:error, :not_datetime} + + defp safe_to_string(nil), do: "" + defp safe_to_string(value) when is_binary(value), do: value + defp safe_to_string(value) when is_integer(value), do: Integer.to_string(value) + defp safe_to_string(value) when is_atom(value), do: Atom.to_string(value) + defp safe_to_string(value), do: inspect(value) + + defp normalize_series_label(""), do: "overall" + defp normalize_series_label(nil), do: "overall" + defp normalize_series_label(value), do: value + + defp chart_paths(points) when is_list(points) do + values = Enum.map(points, fn {_dt, v} -> v end) + + case values do + [] -> + %{line: "", area: "", min: 0.0, max: 0.0, latest: nil} + + _ -> + min_v = Enum.min(values, fn -> 0 end) + max_v = Enum.max(values, fn -> 0 end) + latest = List.last(values) + + coords = + Enum.with_index(values) + |> Enum.map(fn {v, idx} -> + x = idx_to_x(idx, length(values)) + y = value_to_y(v, min_v, max_v) + {x, y} + end) + + line = + coords + |> Enum.map(fn {x, y} -> "#{x},#{y}" end) + |> Enum.join(" ") + + area = + case coords do + [] -> + "" + + [{first_x, _} | _] -> + {last_x, _} = List.last(coords) + + path = + coords + |> Enum.map(fn {x, y} -> "#{x},#{y}" end) + |> Enum.join(" L ") + + "M #{first_x},#{baseline_y()} L " <> + path <> + " L #{last_x},#{baseline_y()} Z" + end + + %{line: line, area: area, min: min_v, max: max_v, latest: latest} + end + end + + defp value_to_y(_v, min_v, max_v) when min_v == max_v, do: round(@chart_height / 2) + + defp value_to_y(v, min_v, max_v) do + usable = @chart_height - @chart_pad * 2 + scaled = (v - min_v) / (max_v - min_v) + round(@chart_height - @chart_pad - scaled * usable) + end + + defp baseline_y, do: @chart_height - @chart_pad + + defp idx_to_x(_idx, 0), do: @chart_pad + defp idx_to_x(0, _len), do: @chart_pad + + defp idx_to_x(idx, len) when len > 1 do + usable = @chart_width - @chart_pad * 2 + round(@chart_pad + idx / (len - 1) * usable) + end + + defp series_color(index) do + # Dracula theme inspired colors + colors = [ + {"#50fa7b", "rgba(80,250,123,0.25)"}, + {"#8be9fd", "rgba(139,233,253,0.25)"}, + {"#bd93f9", "rgba(189,147,249,0.25)"}, + {"#ff79c6", "rgba(255,121,198,0.25)"}, + {"#ffb86c", "rgba(255,184,108,0.25)"}, + {"#f1fa8c", "rgba(241,250,140,0.25)"} + ] + + Enum.at(colors, rem(index, length(colors))) + end + + defp dt_label(%DateTime{} = dt), do: Calendar.strftime(dt, "%b %-d %H:%M") + defp dt_label(_), do: "" + + defp format_value(v) when is_float(v), do: :erlang.float_to_binary(v, decimals: 2) + defp format_value(v) when is_integer(v), do: Integer.to_string(v) + defp format_value(_), do: "—" + + @impl true + def update(%{panel_assigns: panel_assigns} = assigns, socket) do + compact = Map.get(panel_assigns || %{}, :compact, false) + + socket = + socket + |> assign(Map.drop(assigns, [:panel_assigns])) + |> assign(panel_assigns || %{}) + |> assign(:compact, compact) + |> assign(:chart_width, @chart_width) + |> assign(:chart_height, @chart_height) + |> assign(:chart_pad, @chart_pad) + + {:ok, socket} + end + + @impl true + def render(assigns) do + compact = Map.get(assigns, :compact, false) + series_points = assigns.series_points || [] + + # Pre-compute chart data for each series for hover functionality + series_data = + Enum.with_index(series_points) + |> Enum.map(fn {{series, points}, idx} -> + paths = chart_paths(points) + {stroke, _fill} = series_color(idx) + point_data = Enum.map(points, fn {dt, v} -> %{dt: dt_label(dt), v: v} end) + %{series: series, paths: paths, stroke: stroke, idx: idx, point_data: point_data} + end) + + assigns = + assigns + |> assign(:compact, compact) + |> assign(:series_count, length(series_points)) + |> assign(:series_data, series_data) + |> assign(:first_dt, first_dt(series_points)) + |> assign(:last_dt, last_dt(series_points)) + + if compact do + render_compact(assigns) + else + render_full(assigns) + end + end + + defp render_compact(assigns) do + ~H""" + <div id={"panel-#{@id}"} class="p-4"> + <div class={[ + "grid gap-3", + @series_count > 1 && "grid-cols-1 lg:grid-cols-2 xl:grid-cols-3", + @series_count == 1 && "grid-cols-1" + ]}> + <%= for data <- @series_data do %> + <.chart_card + id={@id} + data={data} + chart_width={@chart_width} + chart_height={@chart_height} + chart_pad={@chart_pad} + compact={true} + /> + <% end %> + </div> + </div> + """ + end + + defp render_full(assigns) do + ~H""" + <div id={"panel-#{@id}"}> + <.ui_panel> + <:header> + <div class="min-w-0"> + <div class="text-sm font-semibold">{@title || "Timeseries"}</div> + </div> + <div class="text-xs text-base-content/50 font-mono"> + <span :if={is_struct(@first_dt, DateTime)}>{dt_label(@first_dt)}</span> + <span class="px-1">→</span> + <span :if={is_struct(@last_dt, DateTime)}>{dt_label(@last_dt)}</span> + </div> + </:header> + + <div class={[ + "grid gap-4", + @series_count > 1 && "grid-cols-1 md:grid-cols-2", + @series_count <= 1 && "grid-cols-1" + ]}> + <%= for data <- @series_data do %> + <.chart_card + id={@id} + data={data} + chart_width={@chart_width} + chart_height={@chart_height} + chart_pad={@chart_pad} + compact={false} + /> + <% end %> + </div> + </.ui_panel> + </div> + """ + end + + attr :id, :string, required: true + attr :data, :map, required: true + attr :chart_width, :integer, required: true + attr :chart_height, :integer, required: true + attr :chart_pad, :integer, required: true + attr :compact, :boolean, default: false + + defp chart_card(assigns) do + ~H""" + <div + id={"chart-#{@id}-#{@data.idx}"} + class={[ + "rounded-lg border border-base-200 bg-base-100 relative group", + @compact && "p-3", + not @compact && "p-4" + ]} + phx-hook="TimeseriesChart" + data-points={Jason.encode!(@data.point_data)} + > + <div class="flex items-center justify-between gap-3 mb-2"> + <div class="flex items-center gap-2 min-w-0"> + <span + class="inline-block size-2 rounded-full shrink-0" + style={"background-color: #{@data.stroke}"} + /> + <span class={["font-medium truncate", @compact && "text-xs", not @compact && "text-sm"]}> + {@data.series} + </span> + </div> + <div class={[ + "text-base-content/60 font-mono shrink-0", + @compact && "text-[10px]", + not @compact && "text-xs" + ]}> + <span style={"color: #{@data.stroke}"}>{format_value(@data.paths.latest)}</span> + </div> + </div> + + <div class="relative"> + <svg + viewBox={"0 0 #{@chart_width} #{@chart_height}"} + class={["w-full", @compact && "h-24", not @compact && "h-32"]} + preserveAspectRatio="none" + > + <defs> + <linearGradient id={"series-fill-#{@id}-#{@data.idx}"} x1="0" y1="0" x2="0" y2="1"> + <stop offset="0%" stop-color={@data.stroke} stop-opacity="0.3" /> + <stop offset="100%" stop-color={@data.stroke} stop-opacity="0.05" /> + </linearGradient> + </defs> + + <path d={@data.paths.area} fill={"url(#series-fill-#{@id}-#{@data.idx})"} /> + <polyline + fill="none" + stroke={@data.stroke} + stroke-width="2" + stroke-linecap="round" + stroke-linejoin="round" + points={@data.paths.line} + /> + </svg> + + <!-- Hover tooltip - populated by JS --> + <div + class="absolute hidden pointer-events-none bg-base-300 text-base-content text-xs px-2 py-1 rounded shadow-lg z-10 font-mono whitespace-nowrap" + data-tooltip + > + </div> + <!-- Hover line --> + <div + class="absolute hidden pointer-events-none w-px bg-base-content/30 top-0 bottom-0" + data-hover-line + > + </div> + </div> + + <div class={[ + "flex items-center justify-between text-base-content/50 mt-1", + @compact && "text-[10px]", + not @compact && "text-xs" + ]}> + <span>min: <span class="font-mono">{format_value(@data.paths.min)}</span></span> + <span>max: <span class="font-mono">{format_value(@data.paths.max)}</span></span> + </div> + </div> + """ + end + + defp first_dt(series_points) when is_list(series_points) do + series_points + |> Enum.find_value(fn {_series, points} -> + case points do + [{%DateTime{} = dt, _} | _] -> dt + _ -> nil + end + end) + end + + defp first_dt(_), do: nil + + defp last_dt(series_points) when is_list(series_points) do + series_points + |> Enum.find_value(fn {_series, points} -> + case List.last(points) do + {%DateTime{} = dt, _} -> dt + _ -> nil + end + end) + end + + defp last_dt(_), do: nil +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/topology.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/topology.ex new file mode 100644 index 000000000..bd95ed82c --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/plugins/topology.ex @@ -0,0 +1,372 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Plugins.Topology do + @moduledoc false + + use Phoenix.LiveComponent + + @behaviour ServiceRadarWebNGWeb.Dashboard.Plugin + + import ServiceRadarWebNGWeb.UIComponents, only: [ui_panel: 1] + + @max_nodes 120 + @max_edges 240 + + @impl true + def id, do: "topology" + + @impl true + def title, do: "Topology" + + @impl true + def supports?(%{"results" => results}) when is_list(results) do + Enum.any?(results, fn item -> + case unwrap_payload(item) do + %{"nodes" => nodes, "edges" => edges} when is_list(nodes) and is_list(edges) -> true + %{"vertices" => nodes, "edges" => edges} when is_list(nodes) and is_list(edges) -> true + _ -> false + end + end) + end + + def supports?(_), do: false + + @impl true + def build(%{"results" => results} = _srql_response) when is_list(results) do + payloads = + results + |> Enum.map(&unwrap_payload/1) + |> Enum.filter(&is_map/1) + + with {:ok, graph} <- merge_graph_payloads(payloads) do + {:ok, graph} + end + end + + def build(_), do: {:error, :invalid_response} + + @impl true + def update(%{panel_assigns: panel_assigns} = assigns, socket) do + socket = + socket + |> assign(Map.drop(assigns, [:panel_assigns])) + |> assign(panel_assigns || %{}) + |> assign_new(:selected_node_id, fn -> nil end) + + {:ok, socket} + end + + @impl true + def handle_event("select_node", %{"id" => node_id}, socket) do + node_id = node_id |> to_string() |> String.trim() + {:noreply, assign(socket, :selected_node_id, if(node_id == "", do: nil, else: node_id))} + end + + def handle_event("clear_selection", _params, socket) do + {:noreply, assign(socket, :selected_node_id, nil)} + end + + @impl true + def render(assigns) do + assigns = + assigns + |> assign_new(:nodes, fn -> [] end) + |> assign_new(:edges, fn -> [] end) + |> assign_new(:selected_node_id, fn -> nil end) + |> assign(:layout, layout(assigns.nodes)) + |> assign(:selected_node, find_node(assigns.nodes, assigns.selected_node_id)) + + ~H""" + <div id={"panel-#{@id}"}> + <.ui_panel> + <:header> + <div class="min-w-0"> + <div class="text-sm font-semibold">Topology</div> + <div class="text-xs text-base-content/70"> + Nodes: <span class="font-mono">{length(@nodes)}</span> + <span class="opacity-60">·</span> + Edges: <span class="font-mono">{length(@edges)}</span> + <span :if={@selected_node_id} class="opacity-60"> + · Selected: <span class="font-mono">{@selected_node_id}</span> + </span> + </div> + </div> + + <div class="shrink-0 flex items-center gap-2"> + <button + :if={@selected_node_id} + type="button" + class="btn btn-ghost btn-sm" + phx-click="clear_selection" + phx-target={@myself} + > + Clear + </button> + </div> + </:header> + + <div :if={@nodes == []} class="text-sm text-base-content/70"> + No graph results detected. Return a JSON object with <span class="font-mono">nodes</span> + and <span class="font-mono">edges</span>. + </div> + + <div :if={@nodes != []} class="grid grid-cols-1 lg:grid-cols-3 gap-4"> + <div class="lg:col-span-2 rounded-xl border border-base-200 bg-base-100 overflow-hidden"> + <svg viewBox="0 0 1000 600" class="w-full h-[420px] bg-base-100"> + <defs> + <marker + id="arrow" + markerWidth="10" + markerHeight="10" + refX="8" + refY="3" + orient="auto" + > + <path d="M0,0 L0,6 L9,3 z" class="fill-base-content/40" /> + </marker> + </defs> + + <%= for edge <- @edges do %> + <% src = Map.get(@layout, edge.source) %> + <% dst = Map.get(@layout, edge.target) %> + <% selected? = + @selected_node_id && + (edge.source == @selected_node_id || edge.target == @selected_node_id) %> + + <line + :if={src && dst} + x1={src.x} + y1={src.y} + x2={dst.x} + y2={dst.y} + stroke-width={if selected?, do: 2.5, else: 1.5} + class={ + if selected?, + do: "stroke-primary/80", + else: "stroke-base-content/25" + } + marker-end="url(#arrow)" + /> + <% end %> + + <%= for node <- @nodes do %> + <% pos = Map.get(@layout, node.id) %> + <% selected? = @selected_node_id == node.id %> + + <g + :if={pos} + class="cursor-pointer" + phx-click="select_node" + phx-target={@myself} + phx-value-id={node.id} + > + <circle + cx={pos.x} + cy={pos.y} + r={if selected?, do: 14, else: 11} + class={ + if selected?, + do: "fill-primary stroke-primary/40", + else: "fill-base-200 stroke-base-300" + } + stroke-width="2" + /> + <text + x={pos.x + 16} + y={pos.y + 4} + class={ + if selected?, + do: "fill-base-content text-xs font-semibold", + else: "fill-base-content/80 text-xs" + } + > + {node.label} + </text> + </g> + <% end %> + </svg> + </div> + + <div class="rounded-xl border border-base-200 bg-base-100 p-4"> + <div class="text-xs font-semibold mb-2">Selected Node</div> + + <div :if={is_nil(@selected_node)} class="text-sm text-base-content/70"> + Click a node to inspect details. + </div> + + <div :if={not is_nil(@selected_node)} class="flex flex-col gap-3"> + <div class="text-sm font-semibold truncate">{@selected_node.label}</div> + <div class="text-xs text-base-content/60"> + <span class="font-mono">{@selected_node.id}</span> + </div> + + <div class="rounded-lg border border-base-200 bg-base-200/30 p-3 overflow-x-auto"> + <pre class="text-xs leading-relaxed"><%= Jason.encode!(@selected_node.raw, pretty: true) %></pre> + </div> + </div> + </div> + </div> + </.ui_panel> + </div> + """ + end + + defp unwrap_payload(%{"result" => value}), do: value + defp unwrap_payload(value), do: value + + defp merge_graph_payloads(payloads) when is_list(payloads) do + {nodes, edges} = + Enum.reduce(payloads, {[], []}, fn payload, {nodes_acc, edges_acc} -> + {nodes, edges} = graph_parts(payload) + {nodes_acc ++ nodes, edges_acc ++ edges} + end) + + nodes = + nodes + |> Enum.filter(&is_map/1) + |> Enum.map(&normalize_node/1) + |> Enum.reject(&is_nil/1) + |> Enum.uniq_by(& &1.id) + |> Enum.take(@max_nodes) + + node_ids = MapSet.new(Enum.map(nodes, & &1.id)) + + edges = + edges + |> Enum.filter(&is_map/1) + |> Enum.map(&normalize_edge/1) + |> Enum.reject(&is_nil/1) + |> Enum.filter(fn e -> + MapSet.member?(node_ids, e.source) and MapSet.member?(node_ids, e.target) + end) + |> Enum.uniq_by(fn e -> {e.source, e.target, e.label} end) + |> Enum.take(@max_edges) + + {:ok, %{nodes: nodes, edges: edges}} + end + + defp merge_graph_payloads(_), do: {:error, :invalid_payloads} + + defp graph_parts(%{"nodes" => nodes, "edges" => edges}) when is_list(nodes) and is_list(edges), + do: {nodes, edges} + + defp graph_parts(%{"vertices" => nodes, "edges" => edges}) + when is_list(nodes) and is_list(edges), + do: {nodes, edges} + + defp graph_parts(_), do: {[], []} + + defp normalize_node(%{} = raw) do + id = + first_string(raw, ["id", "device_id", "poller_id", "agent_id", "name"]) || + fallback_id(raw) + + label = first_string(raw, ["label", "hostname", "name"]) || id + + if is_binary(id) and id != "" do + %{id: id, label: String.slice(label, 0, 80), raw: raw} + else + nil + end + end + + defp normalize_edge(%{} = raw) do + source = first_string(raw, ["source", "from", "src", "start", "start_id", "from_id"]) + target = first_string(raw, ["target", "to", "dst", "end", "end_id", "to_id"]) + label = first_string(raw, ["label", "type", "kind", "name"]) || "" + + if is_binary(source) and is_binary(target) and source != "" and target != "" do + %{source: source, target: target, label: String.slice(label, 0, 60), raw: raw} + else + nil + end + end + + defp layout(nodes) when is_list(nodes) do + nodes = + nodes + |> Enum.filter(&is_map/1) + |> Enum.take(@max_nodes) + + count = length(nodes) + + positions = + cond do + count == 0 -> + [] + + count <= 24 -> + circle_positions(count, 500, 300, 220) + + true -> + grid_positions(count, 120, 120, 980, 560) + end + + nodes + |> Enum.zip(positions) + |> Map.new(fn {%{id: id}, {x, y}} -> {id, %{x: x, y: y}} end) + end + + defp layout(_), do: %{} + + defp circle_positions(count, cx, cy, r) do + Enum.map(0..(count - 1), fn idx -> + theta = 2.0 * :math.pi() * idx / count + x = cx + r * :math.cos(theta) + y = cy + r * :math.sin(theta) + {round(x), round(y)} + end) + end + + defp grid_positions(count, x0, y0, width, height) do + cols = max(1, :math.sqrt(count) |> Float.ceil() |> trunc()) + rows = max(1, Float.ceil(count / cols) |> trunc()) + dx = max(1, div(width - x0, max(cols - 1, 1))) + dy = max(1, div(height - y0, max(rows - 1, 1))) + + Enum.map(0..(count - 1), fn idx -> + col = rem(idx, cols) + row = div(idx, cols) + {x0 + col * dx, y0 + row * dy} + end) + end + + defp find_node(_nodes, nil), do: nil + + defp find_node(nodes, id) when is_list(nodes) and is_binary(id) do + Enum.find(nodes, fn + %{id: ^id} -> true + _ -> false + end) + end + + defp find_node(_, _), do: nil + + defp first_string(map, keys) do + Enum.reduce_while(keys, nil, fn key, _acc -> + case Map.get(map, key) do + nil -> + {:cont, nil} + + value when is_binary(value) and value != "" -> + {:halt, value} + + value when is_integer(value) -> + {:halt, Integer.to_string(value)} + + value when is_atom(value) -> + {:halt, Atom.to_string(value)} + + _ -> + {:cont, nil} + end + end) + end + + defp fallback_id(raw) do + raw + |> Jason.encode!() + |> :erlang.phash2() + |> Integer.to_string() + rescue + _ -> "" + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/dashboard/registry.ex b/web-ng/lib/serviceradar_web_ng_web/dashboard/registry.ex new file mode 100644 index 000000000..e1391f0ca --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/dashboard/registry.ex @@ -0,0 +1,15 @@ +defmodule ServiceRadarWebNGWeb.Dashboard.Registry do + @moduledoc false + + alias ServiceRadarWebNGWeb.Dashboard.Plugins + + def plugins do + [ + Plugins.Timeseries, + Plugins.Topology, + Plugins.GraphResult, + Plugins.Categories, + Plugins.Table + ] + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/endpoint.ex b/web-ng/lib/serviceradar_web_ng_web/endpoint.ex new file mode 100644 index 000000000..5211f3518 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/endpoint.ex @@ -0,0 +1,54 @@ +defmodule ServiceRadarWebNGWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :serviceradar_web_ng + + # The session will be stored in the cookie and signed, + # this means its contents can be read but not tampered with. + # Set :encryption_salt if you would also like to encrypt it. + @session_options [ + store: :cookie, + key: "_serviceradar_web_ng_key", + signing_salt: "fttoLWPw", + same_site: "Lax" + ] + + socket "/live", Phoenix.LiveView.Socket, + websocket: [connect_info: [session: @session_options]], + longpoll: [connect_info: [session: @session_options]] + + # Serve at "/" the static files from "priv/static" directory. + # + # When code reloading is disabled (e.g., in production), + # the `gzip` option is enabled to serve compressed + # static files generated by running `phx.digest`. + plug Plug.Static, + at: "/", + from: :serviceradar_web_ng, + gzip: not code_reloading?, + only: ServiceRadarWebNGWeb.static_paths() + + # Code reloading can be explicitly enabled under the + # :code_reloader configuration of your endpoint. + if code_reloading? do + socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket + plug Phoenix.LiveReloader + plug Phoenix.CodeReloader + plug Phoenix.Ecto.CheckRepoStatus, otp_app: :serviceradar_web_ng + end + + plug Phoenix.LiveDashboard.RequestLogger, + param_key: "request_logger", + cookie_key: "request_logger" + + plug Plug.RequestId + plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] + + plug ServiceRadarWebNGWeb.Plugs.SafeParsers, + parsers: [:urlencoded, :multipart, :json], + pass: ["*/*"], + json_decoder: Phoenix.json_library() + + plug Plug.MethodOverride + plug Plug.Head + plug Plug.Session, @session_options + plug ServiceRadarWebNGWeb.Router +end diff --git a/web-ng/lib/serviceradar_web_ng_web/gettext.ex b/web-ng/lib/serviceradar_web_ng_web/gettext.ex new file mode 100644 index 000000000..b3ca2d7ba --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/gettext.ex @@ -0,0 +1,25 @@ +defmodule ServiceRadarWebNGWeb.Gettext do + @moduledoc """ + A module providing Internationalization with a gettext-based API. + + By using [Gettext](https://hexdocs.pm/gettext), your module compiles translations + that you can use in your application. To use this Gettext backend module, + call `use Gettext` and pass it as an option: + + use Gettext, backend: ServiceRadarWebNGWeb.Gettext + + # Simple translation + gettext("Here is the string to translate") + + # Plural translation + ngettext("Here is the string to translate", + "Here are the strings to translate", + 3) + + # Domain-based translation + dgettext("errors", "Here is the error message to translate") + + See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage. + """ + use Gettext.Backend, otp_app: :serviceradar_web_ng +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/analytics_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/analytics_live/index.ex new file mode 100644 index 000000000..4a88227cf --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/analytics_live/index.ex @@ -0,0 +1,1906 @@ +defmodule ServiceRadarWebNGWeb.AnalyticsLive.Index do + use ServiceRadarWebNGWeb, :live_view + + import Ecto.Query + alias ServiceRadarWebNG.Repo + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + require Logger + + @default_events_limit 500 + @default_logs_limit 500 + @default_metrics_limit 100 + @refresh_interval_ms :timer.seconds(30) + + @impl true + def mount(_params, _session, socket) do + srql = %{ + enabled: true, + query: "", + draft: "", + loading: false, + builder_available: true, + builder_open: false, + builder_supported: true, + builder_sync: true, + builder: %{}, + page_path: "/analytics" + } + + # Schedule auto-refresh if connected + if connected?(socket), do: schedule_refresh() + + {:ok, + socket + |> assign(:page_title, "Analytics") + |> assign(:srql, srql) + |> assign(:loading, true) + |> assign(:error, nil) + |> assign(:refreshed_at, nil) + |> assign(:stats, %{}) + |> assign(:device_availability, %{}) + |> assign(:events_summary, %{}) + |> assign(:logs_summary, %{}) + |> assign(:observability, %{}) + |> assign(:high_utilization, %{}) + |> assign(:bandwidth, %{})} + end + + @impl true + def handle_params(_params, _uri, socket) do + {:noreply, load_analytics(socket)} + end + + @impl true + def handle_info(:refresh_data, socket) do + schedule_refresh() + {:noreply, load_analytics(socket)} + end + + def handle_info(_msg, socket), do: {:noreply, socket} + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_submit", params, fallback_path: "/analytics")} + end + + def handle_event("srql_builder_toggle", _params, socket) do + entity = get_in(socket.assigns, [:srql, :builder, "entity"]) || "devices" + {:noreply, SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: entity)} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_run", %{}, fallback_path: "/analytics")} + end + + def handle_event("srql_builder_add_filter", params, socket) do + entity = get_in(socket.assigns, [:srql, :builder, "entity"]) || "devices" + {:noreply, SRQLPage.handle_event(socket, "srql_builder_add_filter", params, entity: entity)} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + entity = get_in(socket.assigns, [:srql, :builder, "entity"]) || "devices" + {:noreply, SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, entity: entity)} + end + + def handle_event(_event, _params, socket), do: {:noreply, socket} + + defp schedule_refresh do + Process.send_after(self(), :refresh_data, @refresh_interval_ms) + end + + # Query the continuous aggregation for efficient pre-computed stats + defp get_hourly_metrics_stats do + cutoff = DateTime.add(DateTime.utc_now(), -24, :hour) + + query = + from(s in "otel_metrics_hourly_stats", + where: s.bucket >= ^cutoff, + select: %{ + total_count: sum(s.total_count), + error_count: sum(s.error_count), + slow_count: sum(s.slow_count), + http_4xx_count: sum(s.http_4xx_count), + http_5xx_count: sum(s.http_5xx_count), + grpc_error_count: sum(s.grpc_error_count), + avg_duration_ms: + fragment( + "CASE WHEN SUM(?) > 0 THEN SUM(? * ?) / SUM(?) ELSE 0 END", + s.total_count, + s.avg_duration_ms, + s.total_count, + s.total_count + ), + p95_duration_ms: max(s.p95_duration_ms), + max_duration_ms: max(s.max_duration_ms) + } + ) + + case Repo.one(query) do + %{total_count: total} = stats when not is_nil(total) -> + %{ + total: to_int(total), + error: to_int(stats.error_count), + slow: to_int(stats.slow_count), + http_4xx: to_int(stats.http_4xx_count), + http_5xx: to_int(stats.http_5xx_count), + grpc_error: to_int(stats.grpc_error_count), + avg_duration_ms: to_float(stats.avg_duration_ms), + p95_duration_ms: to_float(stats.p95_duration_ms), + max_duration_ms: to_float(stats.max_duration_ms) + } + + _ -> + Logger.debug("Hourly metrics stats not available, falling back to SRQL queries") + nil + end + rescue + error -> + Logger.warning("Failed to query hourly metrics stats: #{inspect(error)}") + nil + end + + defp to_float(nil), do: 0.0 + defp to_float(%Decimal{} = d), do: Decimal.to_float(d) + defp to_float(v) when is_float(v), do: v + defp to_float(v) when is_integer(v), do: v * 1.0 + defp to_float(_), do: 0.0 + + defp load_analytics(socket) do + srql_module = srql_module() + + # Try to get metrics stats from continuous aggregation first (more efficient) + hourly_stats = get_hourly_metrics_stats() + + queries = %{ + devices_total: ~s|in:devices stats:"count() as total"|, + devices_online: ~s|in:devices is_available:true stats:"count() as online"|, + devices_offline: ~s|in:devices is_available:false stats:"count() as offline"|, + # Get unique services by service_name in the last hour (most recent status) + services_list: "in:services time:last_1h sort:timestamp:desc limit:500", + events: "in:events time:last_24h sort:event_timestamp:desc limit:#{@default_events_limit}", + logs_recent: "in:logs time:last_24h sort:timestamp:desc limit:#{@default_logs_limit}", + logs_total: ~s|in:logs time:last_24h stats:"count() as total"|, + logs_fatal: ~s|in:logs time:last_24h severity_text:(fatal,FATAL) stats:"count() as fatal"|, + logs_error: ~s|in:logs time:last_24h severity_text:(error,ERROR) stats:"count() as error"|, + logs_warning: + ~s|in:logs time:last_24h severity_text:(warning,warn,WARNING,WARN) stats:"count() as warning"|, + logs_info: ~s|in:logs time:last_24h severity_text:(info,INFO) stats:"count() as info"|, + logs_debug: + ~s|in:logs time:last_24h severity_text:(debug,trace,DEBUG,TRACE) stats:"count() as debug"|, + trace_stats: + "in:otel_trace_summaries time:last_24h " <> + ~s|stats:"count() as total, sum(if(status_code != 1, 1, 0)) as error_traces, sum(if(duration_ms > 100, 1, 0)) as slow_traces"|, + slow_spans: "in:otel_metrics time:last_24h is_slow:true sort:duration_ms:desc limit:25", + # High utilization - get recent CPU metrics + cpu_metrics: + "in:cpu_metrics time:last_1h sort:timestamp:desc limit:#{@default_metrics_limit}", + # High utilization - get recent Memory metrics + memory_metrics: + "in:memory_metrics time:last_1h sort:timestamp:desc limit:#{@default_metrics_limit}", + # High utilization - get recent Disk metrics + disk_metrics: + "in:disk_metrics time:last_1h sort:timestamp:desc limit:#{@default_metrics_limit}" + # TODO: Re-enable when backend supports rperf_targets entity + # rperf_targets: "in:rperf_targets time:last_1h sort:timestamp:desc limit:50" + } + + # Only add SRQL fallback queries if hourly stats failed + queries = + if is_nil(hourly_stats) do + Map.merge(queries, %{ + metrics_total: ~s|in:otel_metrics time:last_24h stats:"count() as total"|, + metrics_slow: ~s|in:otel_metrics time:last_24h is_slow:true stats:"count() as total"|, + metrics_error_http4: + ~s|in:otel_metrics time:last_24h http_status_code:4% stats:"count() as total"|, + metrics_error_http5: + ~s|in:otel_metrics time:last_24h http_status_code:5% stats:"count() as total"|, + metrics_error_grpc: + ~s|in:otel_metrics time:last_24h !grpc_status_code:0 !grpc_status_code:"" stats:"count() as total"| + }) + else + queries + end + + results = + queries + |> Task.async_stream( + fn {key, query} -> {key, srql_module.query(query)} end, + ordered: false, + timeout: 30_000 + ) + |> Enum.reduce(%{}, fn + {:ok, {key, result}}, acc -> Map.put(acc, key, result) + {:exit, reason}, acc -> Map.put(acc, :error, "query task exit: #{inspect(reason)}") + end) + + # Merge in hourly stats if available + results = + if hourly_stats do + Map.put(results, :hourly_stats, hourly_stats) + else + results + end + + {stats, device_availability, events_summary, logs_summary, observability, high_utilization, + bandwidth, error} = + build_assigns(results) + + socket + |> assign(:stats, stats) + |> assign(:device_availability, device_availability) + |> assign(:events_summary, events_summary) + |> assign(:logs_summary, logs_summary) + |> assign(:observability, observability) + |> assign(:high_utilization, high_utilization) + |> assign(:bandwidth, bandwidth) + |> assign(:refreshed_at, DateTime.utc_now()) + |> assign(:error, error) + |> assign(:loading, false) + end + + defp build_assigns(results) do + total_devices = extract_count(results[:devices_total]) + online_devices = extract_count(results[:devices_online]) + offline_devices = extract_count(results[:devices_offline]) + + # Calculate unique services from the services list + services_rows = extract_rows(results[:services_list]) + {unique_services, failing_services} = count_unique_services(services_rows) + + stats = %{ + total_devices: total_devices, + offline_devices: offline_devices, + total_services: unique_services, + failing_services: failing_services + } + + availability_pct = + if total_devices > 0 do + Float.round(online_devices / total_devices * 100, 1) + else + 100.0 + end + + device_availability = %{ + online: online_devices, + offline: offline_devices, + total: total_devices, + availability_pct: availability_pct + } + + events_rows = extract_rows(results[:events]) + events_summary = build_events_summary(events_rows) + + logs_rows = extract_rows(results[:logs_recent]) + + logs_counts = %{ + total: extract_count(results[:logs_total]), + fatal: extract_count(results[:logs_fatal]), + error: extract_count(results[:logs_error]), + warning: extract_count(results[:logs_warning]), + info: extract_count(results[:logs_info]), + debug: extract_count(results[:logs_debug]) + } + + logs_summary = build_logs_summary(logs_rows, logs_counts) + + # Build observability summary - prefer pre-computed hourly stats if available + {metrics_total, metrics_error, metrics_slow, avg_duration} = + case Map.get(results, :hourly_stats) do + %{total: total, error: error, slow: slow, avg_duration_ms: avg_ms} -> + # Use efficient pre-computed stats from continuous aggregation + {total, error, slow, avg_ms} + + _ -> + # Fallback to individual SRQL query results + total = extract_count(results[:metrics_total]) + slow = extract_count(results[:metrics_slow]) + http4 = extract_count(results[:metrics_error_http4]) + http5 = extract_count(results[:metrics_error_http5]) + grpc = extract_count(results[:metrics_error_grpc]) + {total, http4 + http5 + grpc, slow, 0} + end + + trace_stats = extract_map(results[:trace_stats]) + slow_spans_rows = extract_rows(results[:slow_spans]) + + observability = + build_observability_summary( + metrics_total, + metrics_error, + metrics_slow, + avg_duration, + trace_stats, + slow_spans_rows + ) + + # Build high utilization summary from CPU, Memory, and Disk metrics + cpu_metrics_rows = extract_rows(results[:cpu_metrics]) + memory_metrics_rows = extract_rows(results[:memory_metrics]) + disk_metrics_rows = extract_rows(results[:disk_metrics]) + + high_utilization = + build_high_utilization_summary(cpu_metrics_rows, memory_metrics_rows, disk_metrics_rows) + + # Bandwidth summary (disabled until rperf_targets entity is supported) + bandwidth = %{ + targets: [], + total_download: 0.0, + total_upload: 0.0, + avg_latency: 0.0, + target_count: 0 + } + + error = + Enum.find_value(results, fn + {:error, reason} -> format_error(reason) + {_key, {:error, reason}} -> format_error(reason) + _ -> nil + end) + + {stats, device_availability, events_summary, logs_summary, observability, high_utilization, + bandwidth, error} + end + + defp extract_rows({:ok, %{"results" => rows}}) when is_list(rows), do: rows + defp extract_rows(_), do: [] + + defp extract_map({:ok, %{"results" => [%{} = row | _]}}), do: row + defp extract_map(_), do: %{} + + defp count_unique_services(rows) when is_list(rows) do + # Group by service_name and get most recent status for each + services_by_name = + rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + service_name = Map.get(row, "service_name") + device_id = Map.get(row, "device_id") + # Use composite key of device_id + service_name to identify unique service instances + key = "#{device_id}:#{service_name}" + + if is_binary(service_name) and service_name != "" do + # Keep most recent entry per service (rows are sorted by timestamp desc) + Map.put_new(acc, key, row) + else + acc + end + end) + + unique_count = map_size(services_by_name) + + failing_count = + services_by_name + |> Map.values() + |> Enum.count(fn row -> + Map.get(row, "available") == false + end) + + {unique_count, failing_count} + end + + defp count_unique_services(_), do: {0, 0} + + defp extract_count({:ok, %{"results" => [value | _]}}) do + case value do + v when is_integer(v) -> + v + + v when is_float(v) -> + trunc(v) + + v when is_binary(v) -> + case Integer.parse(String.trim(v)) do + {parsed, ""} -> parsed + _ -> 0 + end + + %{} = row -> + row + |> Map.values() + |> Enum.find(fn v -> is_integer(v) or is_float(v) or (is_binary(v) and v != "") end) + |> case do + v when is_integer(v) -> + v + + v when is_float(v) -> + trunc(v) + + v when is_binary(v) -> + case Integer.parse(String.trim(v)) do + {parsed, ""} -> parsed + _ -> 0 + end + + _ -> + 0 + end + + _ -> + 0 + end + end + + defp extract_count(_), do: 0 + + defp build_events_summary(rows) when is_list(rows) do + counts = + rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{critical: 0, high: 0, medium: 0, low: 0}, fn row, acc -> + severity = row |> Map.get("severity") |> normalize_severity() + + case severity do + "Critical" -> Map.update!(acc, :critical, &(&1 + 1)) + "High" -> Map.update!(acc, :high, &(&1 + 1)) + "Medium" -> Map.update!(acc, :medium, &(&1 + 1)) + "Low" -> Map.update!(acc, :low, &(&1 + 1)) + _ -> acc + end + end) + + recent = + rows + |> Enum.filter(&is_map/1) + |> Enum.filter(fn row -> + severity = row |> Map.get("severity") |> normalize_severity() + severity in ["Critical", "High"] + end) + |> Enum.take(5) + + Map.merge(counts, %{total: length(rows), recent: recent}) + end + + defp build_events_summary(_), + do: %{critical: 0, high: 0, medium: 0, low: 0, total: 0, recent: []} + + defp build_logs_summary(rows, %{} = counts) when is_list(rows) do + recent = + rows + |> Enum.filter(&is_map/1) + |> Enum.filter(fn row -> + severity = row |> Map.get("severity_text") |> normalize_log_level() + severity in ["Fatal", "Error"] + end) + |> Enum.take(5) + + counts + |> Map.take([:total, :fatal, :error, :warning, :info, :debug]) + |> Map.put(:recent, recent) + end + + defp build_logs_summary(_rows, _counts), + do: %{fatal: 0, error: 0, warning: 0, info: 0, debug: 0, total: 0, recent: []} + + defp build_observability_summary( + metrics_total, + metrics_error, + metrics_slow, + avg_duration_ms, + trace_stats, + slow_spans + ) + when is_integer(metrics_total) and is_integer(metrics_error) and is_integer(metrics_slow) and + is_number(avg_duration_ms) and is_map(trace_stats) and is_list(slow_spans) do + trace_stats = + case Map.get(trace_stats, "payload") do + %{} = payload -> payload + _ -> trace_stats + end + + traces_count = + extract_numeric(Map.get(trace_stats, "total") || Map.get(trace_stats, "count")) |> to_int() + + error_rate = + if metrics_total > 0 do + Float.round(metrics_error / metrics_total * 100.0, 1) + else + 0.0 + end + + slow_spans = + slow_spans + |> Enum.filter(&is_map/1) + |> Enum.take(5) + + %{ + metrics_count: metrics_total, + traces_count: traces_count, + avg_duration: avg_duration_ms, + error_rate: error_rate, + slow_spans_count: metrics_slow, + slow_spans: slow_spans + } + end + + defp build_observability_summary(_, _, _, _, _, _), + do: %{ + metrics_count: 0, + traces_count: 0, + avg_duration: 0, + error_rate: 0.0, + slow_spans_count: 0, + slow_spans: [] + } + + defp to_int(nil), do: 0 + defp to_int(value) when is_integer(value), do: value + defp to_int(value) when is_float(value), do: trunc(value) + defp to_int(%Decimal{} = d), do: Decimal.to_integer(d) + defp to_int(_), do: 0 + + defp build_high_utilization_summary(cpu_rows, memory_rows, disk_rows) + when is_list(cpu_rows) and is_list(memory_rows) and is_list(disk_rows) do + # Deduplicate CPU by host, keeping most recent + unique_cpu_hosts = + cpu_rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + host = Map.get(row, "host") || Map.get(row, "device_id") || "" + if host != "", do: Map.put_new(acc, host, row), else: acc + end) + |> Map.values() + + # Deduplicate Memory by host, keeping most recent + unique_memory_hosts = + memory_rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + host = Map.get(row, "host") || Map.get(row, "device_id") || "" + if host != "", do: Map.put_new(acc, host, row), else: acc + end) + |> Map.values() + + # Deduplicate Disk by host+mount, keeping most recent + unique_disks = + disk_rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + host = Map.get(row, "host") || Map.get(row, "device_id") || "" + mount = Map.get(row, "mount_point") || Map.get(row, "mount") || "" + key = "#{host}:#{mount}" + if host != "", do: Map.put_new(acc, key, row), else: acc + end) + |> Map.values() + + # Categorize CPU by utilization level + cpu_categorized = + unique_cpu_hosts + |> Enum.reduce(%{warning: [], critical: []}, fn row, acc -> + cpu_usage = + extract_numeric( + Map.get(row, "value") || Map.get(row, "cpu_usage") || Map.get(row, "usage_percent") || + Map.get(row, "user") || 0 + ) + + cond do + cpu_usage >= 90 -> Map.update!(acc, :critical, &[row | &1]) + cpu_usage >= 80 -> Map.update!(acc, :warning, &[row | &1]) + true -> acc + end + end) + + # Categorize Memory by utilization level + memory_categorized = + unique_memory_hosts + |> Enum.reduce(%{warning: [], critical: []}, fn row, acc -> + mem_usage = + extract_numeric( + Map.get(row, "percent") || Map.get(row, "value") || Map.get(row, "used_percent") || 0 + ) + + cond do + mem_usage >= 90 -> Map.update!(acc, :critical, &[row | &1]) + mem_usage >= 85 -> Map.update!(acc, :warning, &[row | &1]) + true -> acc + end + end) + + # Categorize Disk by utilization level + disk_categorized = + unique_disks + |> Enum.reduce(%{warning: [], critical: []}, fn row, acc -> + disk_usage = extract_numeric(Map.get(row, "percent") || Map.get(row, "value") || 0) + + cond do + disk_usage >= 90 -> Map.update!(acc, :critical, &[row | &1]) + disk_usage >= 85 -> Map.update!(acc, :warning, &[row | &1]) + true -> acc + end + end) + + # Get top high CPU utilization hosts + high_cpu_services = + unique_cpu_hosts + |> Enum.filter(fn row -> + cpu = + extract_numeric( + Map.get(row, "value") || Map.get(row, "cpu_usage") || Map.get(row, "usage_percent") || + Map.get(row, "user") || 0 + ) + + cpu >= 70 + end) + |> Enum.sort_by(fn row -> + cpu = + extract_numeric( + Map.get(row, "value") || Map.get(row, "cpu_usage") || Map.get(row, "usage_percent") || + Map.get(row, "user") || 0 + ) + + -cpu + end) + |> Enum.take(3) + + # Get top high memory utilization hosts + high_memory_services = + unique_memory_hosts + |> Enum.filter(fn row -> + mem = + extract_numeric( + Map.get(row, "percent") || Map.get(row, "value") || Map.get(row, "used_percent") || 0 + ) + + mem >= 70 + end) + |> Enum.sort_by(fn row -> + mem = + extract_numeric( + Map.get(row, "percent") || Map.get(row, "value") || Map.get(row, "used_percent") || 0 + ) + + -mem + end) + |> Enum.take(3) + + # Get top high disk utilization + high_disk_services = + unique_disks + |> Enum.filter(fn row -> + disk = extract_numeric(Map.get(row, "percent") || Map.get(row, "value") || 0) + disk >= 70 + end) + |> Enum.sort_by(fn row -> + disk = extract_numeric(Map.get(row, "percent") || Map.get(row, "value") || 0) + -disk + end) + |> Enum.take(3) + + %{ + cpu_warning: length(cpu_categorized.warning), + cpu_critical: length(cpu_categorized.critical), + memory_warning: length(memory_categorized.warning), + memory_critical: length(memory_categorized.critical), + disk_warning: length(disk_categorized.warning), + disk_critical: length(disk_categorized.critical), + cpu_services: high_cpu_services, + memory_services: high_memory_services, + disk_services: high_disk_services, + total_cpu_hosts: length(unique_cpu_hosts), + total_memory_hosts: length(unique_memory_hosts), + total_disk_mounts: length(unique_disks) + } + end + + defp build_high_utilization_summary(_, _, _), + do: %{ + cpu_warning: 0, + cpu_critical: 0, + memory_warning: 0, + memory_critical: 0, + disk_warning: 0, + disk_critical: 0, + cpu_services: [], + memory_services: [], + disk_services: [], + total_cpu_hosts: 0, + total_memory_hosts: 0, + total_disk_mounts: 0 + } + + defp extract_numeric(value) when is_number(value), do: value + + defp extract_numeric(value) when is_binary(value) do + case Float.parse(value) do + {num, _} -> num + :error -> 0 + end + end + + defp extract_numeric(_), do: 0 + + defp normalize_severity(nil), do: "" + + defp normalize_severity(value) do + case value |> to_string() |> String.trim() |> String.downcase() do + "critical" -> "Critical" + "high" -> "High" + "medium" -> "Medium" + "low" -> "Low" + _ -> "" + end + end + + defp normalize_log_level(nil), do: "" + + defp normalize_log_level(value) do + case value |> to_string() |> String.trim() |> String.downcase() do + "fatal" -> "Fatal" + "error" -> "Error" + "warn" -> "Warning" + "warning" -> "Warning" + "info" -> "Info" + "debug" -> "Debug" + "trace" -> "Debug" + _ -> "" + end + end + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end + + @impl true + def render(assigns) do + ~H""" + <Layouts.app flash={@flash} current_scope={@current_scope} srql={@srql}> + <div class="mx-auto max-w-7xl"> + <div :if={is_binary(@error)} class="mb-4"> + <div role="alert" class="alert alert-error"> + <.icon name="hero-exclamation-triangle" class="size-5" /> + <span class="text-sm">{@error}</span> + </div> + </div> + + <div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-3 mb-4"> + <.stat_card + title="Total Devices" + value={Map.get(@stats, :total_devices, 0)} + icon="hero-server" + href={~p"/devices"} + /> + <.stat_card + title="Offline Devices" + value={Map.get(@stats, :offline_devices, 0)} + icon="hero-signal-slash" + tone={if Map.get(@stats, :offline_devices, 0) > 0, do: "error", else: "success"} + href={~p"/devices?#{%{q: "in:devices is_available:false sort:last_seen:desc limit:100"}}"} + /> + <.stat_card + title="Active Services" + value={Map.get(@stats, :total_services, 0)} + subtitle="unique" + icon="hero-wrench-screwdriver" + href={~p"/services"} + /> + <.stat_card + title="Failing Services" + value={Map.get(@stats, :failing_services, 0)} + subtitle="unique" + icon="hero-exclamation-triangle" + tone={if Map.get(@stats, :failing_services, 0) > 0, do: "error", else: "success"} + href={ + ~p"/services?#{%{q: "in:services available:false time:last_1h sort:timestamp:desc limit:100"}}" + } + /> + </div> + + <div class="grid grid-cols-1 lg:grid-cols-2 xl:grid-cols-3 gap-4"> + <.device_availability_widget availability={@device_availability} loading={@loading} /> + <.high_utilization_widget data={@high_utilization} loading={@loading} /> + <.bandwidth_widget data={@bandwidth} loading={@loading} /> + <.critical_logs_widget summary={@logs_summary} loading={@loading} /> + <.observability_widget data={@observability} loading={@loading} /> + <.critical_events_widget summary={@events_summary} loading={@loading} /> + </div> + + <div class="mt-3 text-xs text-base-content/40 flex items-center gap-2"> + <span :if={@loading} class="loading loading-spinner loading-xs" /> + <span :if={is_struct(@refreshed_at, DateTime)} class="font-mono"> + Updated {Calendar.strftime(@refreshed_at, "%H:%M:%S")} + </span> + <span class="text-base-content/30">·</span> + <span>Auto-refresh 30s</span> + </div> + </div> + </Layouts.app> + """ + end + + attr :title, :string, required: true + attr :value, :any, required: true + attr :subtitle, :string, default: nil + attr :href, :string, required: true + attr :icon, :string, default: nil + attr :tone, :string, default: "neutral" + + def stat_card(assigns) do + ~H""" + <.link href={@href} class="block group"> + <div class={[ + "rounded-xl border bg-base-100 p-4 flex items-center gap-4", + "hover:shadow-md transition-shadow cursor-pointer", + tone_border(@tone) + ]}> + <div class={["p-3 rounded-lg", tone_bg(@tone)]}> + <.icon :if={@icon} name={@icon} class={["size-6", tone_icon(@tone)]} /> + </div> + <div class="flex-1 min-w-0"> + <div class={["text-2xl font-bold", tone_value(@tone)]}> + {format_number(@value)} + </div> + <div class="text-sm text-base-content/60"> + {@title} + <span :if={@subtitle} class="text-base-content/40"> + {" | "} + {@subtitle} + </span> + </div> + </div> + </div> + </.link> + """ + end + + defp tone_border("error"), do: "border-error/30" + defp tone_border("warning"), do: "border-warning/30" + defp tone_border("success"), do: "border-success/30" + defp tone_border(_), do: "border-base-200" + + defp tone_bg("error"), do: "bg-error/10" + defp tone_bg("warning"), do: "bg-warning/10" + defp tone_bg("success"), do: "bg-success/10" + defp tone_bg(_), do: "bg-primary/10" + + defp tone_icon("error"), do: "text-error" + defp tone_icon("warning"), do: "text-warning" + defp tone_icon("success"), do: "text-success" + defp tone_icon(_), do: "text-primary" + + defp tone_value("error"), do: "text-error" + defp tone_value("warning"), do: "text-warning" + defp tone_value("success"), do: "text-success" + defp tone_value(_), do: "text-base-content" + + defp format_number(n) when is_integer(n) and n >= 1000 do + n |> Integer.to_string() |> add_commas() + end + + defp format_number(n) when is_integer(n), do: Integer.to_string(n) + defp format_number(n) when is_float(n), do: n |> trunc() |> format_number() + defp format_number(_), do: "0" + + defp format_compact_number(n) when is_float(n), do: n |> trunc() |> format_compact_number() + + defp format_compact_number(n) when is_integer(n) do + sign = if n < 0, do: "-", else: "" + abs_n = abs(n) + + formatted = + cond do + abs_n >= 1_000_000_000 -> + compact_decimal(abs_n / 1_000_000_000.0, 1) <> "B" + + abs_n >= 1_000_000 -> + compact_decimal(abs_n / 1_000_000.0, 1) <> "M" + + abs_n >= 100_000 -> + Integer.to_string(div(abs_n, 1000)) <> "k" + + abs_n >= 1_000 -> + compact_decimal(abs_n / 1000.0, 1) <> "k" + + true -> + Integer.to_string(abs_n) + end + + sign <> formatted + end + + defp format_compact_number(_), do: "0" + + defp compact_decimal(value, decimals) when is_number(value) and is_integer(decimals) do + value + |> :erlang.float_to_binary(decimals: decimals) + |> String.trim_trailing("0") + |> String.trim_trailing(".") + end + + defp add_commas(str) do + str + |> String.reverse() + |> String.graphemes() + |> Enum.chunk_every(3) + |> Enum.join(",") + |> String.reverse() + end + + attr :availability, :map, required: true + attr :loading, :boolean, default: false + + def device_availability_widget(assigns) do + total = Map.get(assigns.availability, :total, 0) + online = Map.get(assigns.availability, :online, 0) + offline = Map.get(assigns.availability, :offline, 0) + pct = Map.get(assigns.availability, :availability_pct, 100.0) + + # Ensure pct is a float for display + pct_display = if is_number(pct), do: Float.round(pct * 1.0, 1), else: 100.0 + + online_pct = if total > 0, do: Float.round(online / total * 100.0, 0), else: 100.0 + offline_pct = if total > 0, do: Float.round(offline / total * 100.0, 0), else: 0.0 + + assigns = + assigns + |> assign(:online, online) + |> assign(:offline, offline) + |> assign(:total, total) + |> assign(:pct, pct_display) + |> assign(:online_pct, online_pct) + |> assign(:offline_pct, offline_pct) + + ~H""" + <.ui_panel class="h-80"> + <:header> + <.link href={~p"/devices"} class="hover:text-primary transition-colors"> + <div class="text-sm font-semibold">Device Availability</div> + </.link> + <.link + href={~p"/devices?#{%{q: "in:devices is_available:false sort:last_seen:desc limit:100"}}"} + class="text-base-content/60 hover:text-primary" + title="View offline devices" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + </.link> + </:header> + + <div :if={@loading} class="flex-1 flex items-center justify-center"> + <span class="loading loading-spinner loading-md" /> + </div> + + <div :if={not @loading} class="flex items-center gap-6 h-full"> + <div class="flex-1"> + <div class="relative w-32 h-32 mx-auto"> + <svg viewBox="0 0 36 36" class="w-full h-full -rotate-90"> + <circle + cx="18" + cy="18" + r="15.5" + fill="none" + stroke="currentColor" + stroke-width="3" + class="text-error/30" + /> + <circle + cx="18" + cy="18" + r="15.5" + fill="none" + stroke="currentColor" + stroke-width="3" + stroke-dasharray={~s(#{@online_pct} #{100 - @online_pct})} + class="text-success" + /> + </svg> + <div class="absolute inset-0 flex flex-col items-center justify-center"> + <span class="text-2xl font-bold">{@pct}%</span> + <span class="text-xs text-base-content/60">Availability</span> + </div> + </div> + </div> + + <div class="flex-1 space-y-3"> + <.link + href={~p"/devices?#{%{q: "in:devices is_available:true sort:last_seen:desc limit:20"}}"} + class="flex items-center justify-between hover:bg-base-200/50 rounded-lg p-2 -m-2 transition-colors" + > + <div class="flex items-center gap-2"> + <span class="w-3 h-3 rounded-full bg-success" /> + <span class="text-sm">Online</span> + </div> + <span class="font-semibold">{format_number(@online)}</span> + </.link> + <.link + href={~p"/devices?#{%{q: "in:devices is_available:false sort:last_seen:desc limit:20"}}"} + class="flex items-center justify-between hover:bg-base-200/50 rounded-lg p-2 -m-2 transition-colors" + > + <div class="flex items-center gap-2"> + <span class="w-3 h-3 rounded-full bg-error" /> + <span class="text-sm">Offline</span> + </div> + <span class="font-semibold">{format_number(@offline)}</span> + </.link> + + <.link + :if={@offline > 0} + href={~p"/devices?#{%{q: "in:devices is_available:false sort:last_seen:desc limit:20"}}"} + class="block mt-4 p-2 rounded-lg bg-error/10 hover:bg-error/20 transition-colors" + > + <div class="flex items-center gap-2 text-error text-sm"> + <.icon name="hero-signal-slash" class="size-4" /> + <span>{@offline} device{if @offline != 1, do: "s", else: ""} offline</span> + </div> + </.link> + </div> + </div> + </.ui_panel> + """ + end + + attr :summary, :map, required: true + attr :loading, :boolean, default: false + + def critical_events_widget(assigns) do + ~H""" + <div class="h-80 rounded-xl border border-base-200 bg-base-100 shadow-sm flex flex-col overflow-hidden"> + <header class="px-4 py-3 bg-base-200/40 flex items-start justify-between gap-3 shrink-0"> + <.link href={~p"/events"} class="hover:text-primary transition-colors"> + <div class="text-sm font-semibold">Critical Events</div> + </.link> + <.link + href={ + ~p"/events?#{%{q: "in:events severity:(Critical,High) time:last_24h sort:event_timestamp:desc limit:100"}}" + } + class="text-base-content/60 hover:text-primary" + title="View critical events" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + </.link> + </header> + + <div :if={@loading} class="flex-1 flex items-center justify-center"> + <span class="loading loading-spinner loading-md" /> + </div> + + <div :if={not @loading} class="flex-1 flex flex-col min-h-0 px-4 py-4"> + <table class="table table-xs mb-3 shrink-0"> + <thead> + <tr class="border-b border-base-200"> + <th class="text-xs font-medium text-base-content/60">Severity</th> + <th class="text-center text-xs font-medium text-base-content/60">Count</th> + <th class="text-center text-xs font-medium text-base-content/60">%</th> + </tr> + </thead> + <tbody> + <.severity_row + label="Critical" + count={Map.get(@summary, :critical, 0)} + total={Map.get(@summary, :total, 0)} + color="error" + href={ + ~p"/events?#{%{q: "in:events severity:Critical time:last_24h sort:event_timestamp:desc limit:100"}}" + } + /> + <.severity_row + label="High" + count={Map.get(@summary, :high, 0)} + total={Map.get(@summary, :total, 0)} + color="warning" + href={ + ~p"/events?#{%{q: "in:events severity:High time:last_24h sort:event_timestamp:desc limit:100"}}" + } + /> + <.severity_row + label="Medium" + count={Map.get(@summary, :medium, 0)} + total={Map.get(@summary, :total, 0)} + color="info" + href={ + ~p"/events?#{%{q: "in:events severity:Medium time:last_24h sort:event_timestamp:desc limit:100"}}" + } + /> + <.severity_row + label="Low" + count={Map.get(@summary, :low, 0)} + total={Map.get(@summary, :total, 0)} + color="primary" + href={ + ~p"/events?#{%{q: "in:events severity:Low time:last_24h sort:event_timestamp:desc limit:100"}}" + } + /> + </tbody> + </table> + + <div + :if={Map.get(@summary, :recent, []) == []} + class="flex-1 flex items-center justify-center text-center" + > + <div> + <.icon name="hero-shield-check" class="size-8 mx-auto mb-2 text-success" /> + <p class="text-sm text-base-content/60">No critical events</p> + <p class="text-xs text-base-content/40 mt-1">All systems reporting normally</p> + </div> + </div> + + <div + :if={Map.get(@summary, :recent, []) != []} + class="flex-1 overflow-y-auto space-y-2 min-h-0" + > + <%= for event <- Map.get(@summary, :recent, []) do %> + <.event_entry event={event} /> + <% end %> + </div> + </div> + </div> + """ + end + + attr :summary, :map, required: true + attr :loading, :boolean, default: false + + def critical_logs_widget(assigns) do + ~H""" + <div class="h-80 rounded-xl border border-base-200 bg-base-100 shadow-sm flex flex-col overflow-hidden"> + <header class="px-4 py-3 bg-base-200/40 flex items-start justify-between gap-3 shrink-0"> + <.link + href={~p"/observability?#{%{tab: "logs"}}"} + class="hover:text-primary transition-colors" + > + <div class="text-sm font-semibold">Critical Logs</div> + </.link> + <.link + href={ + ~p"/observability?#{%{tab: "logs", q: "in:logs severity_text:(fatal,error,FATAL,ERROR) time:last_24h sort:timestamp:desc limit:100"}}" + } + class="text-base-content/60 hover:text-primary" + title="View critical logs" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + </.link> + </header> + + <div :if={@loading} class="flex-1 flex items-center justify-center"> + <span class="loading loading-spinner loading-md" /> + </div> + + <div :if={not @loading} class="flex-1 flex flex-col min-h-0 px-4 py-4"> + <table class="table table-xs mb-3 shrink-0"> + <thead> + <tr class="border-b border-base-200"> + <th class="text-xs font-medium text-base-content/60">Level</th> + <th class="text-center text-xs font-medium text-base-content/60">Count</th> + <th class="text-center text-xs font-medium text-base-content/60">%</th> + </tr> + </thead> + <tbody> + <.severity_row + label="Fatal" + count={Map.get(@summary, :fatal, 0)} + total={Map.get(@summary, :total, 0)} + color="error" + href={ + ~p"/observability?#{%{tab: "logs", q: "in:logs severity_text:(fatal,FATAL) time:last_24h sort:timestamp:desc limit:100"}}" + } + /> + <.severity_row + label="Error" + count={Map.get(@summary, :error, 0)} + total={Map.get(@summary, :total, 0)} + color="warning" + href={ + ~p"/observability?#{%{tab: "logs", q: "in:logs severity_text:(error,ERROR) time:last_24h sort:timestamp:desc limit:100"}}" + } + /> + <.severity_row + label="Warning" + count={Map.get(@summary, :warning, 0)} + total={Map.get(@summary, :total, 0)} + color="info" + href={ + ~p"/observability?#{%{tab: "logs", q: "in:logs severity_text:(warning,warn,WARNING,WARN) time:last_24h sort:timestamp:desc limit:100"}}" + } + /> + <.severity_row + label="Info" + count={Map.get(@summary, :info, 0)} + total={Map.get(@summary, :total, 0)} + color="primary" + href={ + ~p"/observability?#{%{tab: "logs", q: "in:logs severity_text:(info,INFO) time:last_24h sort:timestamp:desc limit:100"}}" + } + /> + <.severity_row + label="Debug" + count={Map.get(@summary, :debug, 0)} + total={Map.get(@summary, :total, 0)} + color="neutral" + href={ + ~p"/observability?#{%{tab: "logs", q: "in:logs severity_text:(debug,trace,DEBUG,TRACE) time:last_24h sort:timestamp:desc limit:100"}}" + } + /> + </tbody> + </table> + + <div + :if={Map.get(@summary, :recent, []) == []} + class="flex-1 flex items-center justify-center text-center" + > + <div> + <.icon name="hero-document-check" class="size-8 mx-auto mb-2 text-success" /> + <p class="text-sm text-base-content/60">No fatal or error logs</p> + <p class="text-xs text-base-content/40 mt-1">All systems logging normally</p> + </div> + </div> + + <div + :if={Map.get(@summary, :recent, []) != []} + class="flex-1 overflow-y-auto space-y-2 min-h-0" + > + <%= for log <- Map.get(@summary, :recent, []) do %> + <.log_entry log={log} /> + <% end %> + </div> + </div> + </div> + """ + end + + attr :data, :map, required: true + attr :loading, :boolean, default: false + + def observability_widget(assigns) do + data = assigns.data || %{} + metrics_count = Map.get(data, :metrics_count, 0) + traces_count = Map.get(data, :traces_count, 0) + avg_duration = Map.get(data, :avg_duration, 0) + error_rate = Map.get(data, :error_rate, 0.0) + slow_spans_count = Map.get(data, :slow_spans_count, 0) + slow_spans = Map.get(data, :slow_spans, []) + + assigns = + assigns + |> assign(:metrics_count, metrics_count) + |> assign(:traces_count, traces_count) + |> assign(:avg_duration, avg_duration) + |> assign(:error_rate, error_rate) + |> assign(:slow_spans_count, slow_spans_count) + |> assign(:slow_spans, slow_spans) + + ~H""" + <.ui_panel class="h-80"> + <:header> + <.link href={~p"/observability"} class="hover:text-primary transition-colors"> + <div class="text-sm font-semibold">Observability</div> + </.link> + <.link + href={ + ~p"/observability?#{%{tab: "traces", q: "in:otel_trace_summaries time:last_24h sort:timestamp:desc limit:100"}}" + } + class="text-base-content/60 hover:text-primary" + title="View traces" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + </.link> + </:header> + + <div :if={@loading} class="flex-1 flex items-center justify-center"> + <span class="loading loading-spinner loading-md" /> + </div> + + <div :if={not @loading} class="flex flex-col h-full"> + <div class="grid grid-cols-2 gap-3 mb-4"> + <div class="rounded-lg bg-base-200/50 p-3 text-center"> + <div class="text-xl font-bold text-primary">{format_compact_number(@metrics_count)}</div> + <div class="text-xs text-base-content/60">Metrics</div> + </div> + <div class="rounded-lg bg-base-200/50 p-3 text-center"> + <div class="text-xl font-bold text-secondary">{format_compact_number(@traces_count)}</div> + <div class="text-xs text-base-content/60">Traces</div> + </div> + <div class="rounded-lg bg-base-200/50 p-3 text-center"> + <div class="text-xl font-bold text-info">{format_duration(@avg_duration)}</div> + <div class="text-xs text-base-content/60">Avg Duration</div> + </div> + <div class={[ + "rounded-lg p-3 text-center", + (@error_rate > 5 && "bg-error/10") || "bg-base-200/50" + ]}> + <div class={["text-xl font-bold", (@error_rate > 5 && "text-error") || "text-success"]}> + {@error_rate}% + </div> + <div class="text-xs text-base-content/60">Error Rate</div> + </div> + </div> + + <div class="flex-1 min-h-0"> + <div class="flex items-center justify-between mb-2"> + <span class="text-xs font-medium text-base-content/70">Slow Spans</span> + <span class={[ + "text-xs font-bold", + (@slow_spans_count > 0 && "text-warning") || "text-base-content/50" + ]}> + {format_compact_number(@slow_spans_count)} + </span> + </div> + + <div :if={@slow_spans == []} class="flex items-center justify-center py-4"> + <div class="text-center"> + <.icon name="hero-bolt" class="size-6 mx-auto mb-1 text-success" /> + <p class="text-xs text-base-content/60">No slow spans</p> + </div> + </div> + + <div :if={@slow_spans != []} class="space-y-1 overflow-y-auto max-h-24"> + <%= for span <- @slow_spans do %> + <div class="flex items-center justify-between text-xs p-1.5 rounded bg-warning/10"> + <span class="truncate max-w-[60%]" title={span_name(span)}>{span_name(span)}</span> + <span class="font-mono text-warning">{format_duration(span_duration(span))}</span> + </div> + <% end %> + </div> + </div> + </div> + </.ui_panel> + """ + end + + attr :data, :map, required: true + attr :loading, :boolean, default: false + + def high_utilization_widget(assigns) do + data = assigns.data || %{} + cpu_warning = Map.get(data, :cpu_warning, 0) + cpu_critical = Map.get(data, :cpu_critical, 0) + memory_warning = Map.get(data, :memory_warning, 0) + memory_critical = Map.get(data, :memory_critical, 0) + disk_warning = Map.get(data, :disk_warning, 0) + disk_critical = Map.get(data, :disk_critical, 0) + cpu_services = Map.get(data, :cpu_services, []) + memory_services = Map.get(data, :memory_services, []) + disk_services = Map.get(data, :disk_services, []) + total_cpu_hosts = Map.get(data, :total_cpu_hosts, 0) + total_memory_hosts = Map.get(data, :total_memory_hosts, 0) + total_disk_mounts = Map.get(data, :total_disk_mounts, 0) + + assigns = + assigns + |> assign(:cpu_warning, cpu_warning) + |> assign(:cpu_critical, cpu_critical) + |> assign(:memory_warning, memory_warning) + |> assign(:memory_critical, memory_critical) + |> assign(:disk_warning, disk_warning) + |> assign(:disk_critical, disk_critical) + |> assign(:cpu_services, cpu_services) + |> assign(:memory_services, memory_services) + |> assign(:disk_services, disk_services) + |> assign(:total_cpu_hosts, total_cpu_hosts) + |> assign(:total_memory_hosts, total_memory_hosts) + |> assign(:total_disk_mounts, total_disk_mounts) + + ~H""" + <.ui_panel class="h-80"> + <:header> + <.link + href={~p"/dashboard?#{%{q: "in:cpu_metrics time:last_1h sort:timestamp:desc"}}"} + class="hover:text-primary transition-colors" + > + <div class="text-sm font-semibold">High Utilization</div> + </.link> + <.link + href={~p"/dashboard?#{%{q: "in:cpu_metrics time:last_1h sort:timestamp:desc limit:100"}}"} + class="text-base-content/60 hover:text-primary" + title="View metrics" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + </.link> + </:header> + + <div :if={@loading} class="flex-1 flex items-center justify-center"> + <span class="loading loading-spinner loading-md" /> + </div> + + <div :if={not @loading} class="flex flex-col h-full"> + <div class="grid grid-cols-3 gap-2 mb-3"> + <div class="rounded-lg bg-base-200/50 p-2"> + <div class="flex items-center gap-1 text-[10px] text-base-content/60 mb-1"> + <.icon name="hero-cpu-chip" class="size-3" /> CPU + </div> + <div class="flex flex-wrap items-center gap-1"> + <span :if={@cpu_critical > 0} class="badge badge-error badge-xs">{@cpu_critical}</span> + <span :if={@cpu_warning > 0} class="badge badge-warning badge-xs">{@cpu_warning}</span> + <span + :if={@cpu_critical == 0 and @cpu_warning == 0} + class="badge badge-success badge-xs" + > + OK + </span> + </div> + </div> + <div class="rounded-lg bg-base-200/50 p-2"> + <div class="flex items-center gap-1 text-[10px] text-base-content/60 mb-1"> + <.icon name="hero-circle-stack" class="size-3" /> Memory + </div> + <div class="flex flex-wrap items-center gap-1"> + <span :if={@memory_critical > 0} class="badge badge-error badge-xs"> + {@memory_critical} + </span> + <span :if={@memory_warning > 0} class="badge badge-warning badge-xs"> + {@memory_warning} + </span> + <span + :if={@memory_critical == 0 and @memory_warning == 0} + class="badge badge-success badge-xs" + > + OK + </span> + </div> + </div> + <div class="rounded-lg bg-base-200/50 p-2"> + <div class="flex items-center gap-1 text-[10px] text-base-content/60 mb-1"> + <.icon name="hero-server-stack" class="size-3" /> Disk + </div> + <div class="flex flex-wrap items-center gap-1"> + <span :if={@disk_critical > 0} class="badge badge-error badge-xs"> + {@disk_critical} + </span> + <span :if={@disk_warning > 0} class="badge badge-warning badge-xs"> + {@disk_warning} + </span> + <span + :if={@disk_critical == 0 and @disk_warning == 0} + class="badge badge-success badge-xs" + > + OK + </span> + </div> + </div> + </div> + + <div class="text-[10px] text-base-content/50 mb-2"> + {@total_cpu_hosts} CPU · {@total_memory_hosts} MEM · {@total_disk_mounts} disks + </div> + + <div + :if={@cpu_services == [] and @memory_services == [] and @disk_services == []} + class="flex-1 flex items-center justify-center" + > + <div class="text-center"> + <.icon name="hero-cpu-chip" class="size-6 mx-auto mb-1 text-success" /> + <p class="text-xs text-base-content/60">No high utilization</p> + </div> + </div> + + <div + :if={@cpu_services != [] or @memory_services != [] or @disk_services != []} + class="flex-1 overflow-y-auto space-y-1 min-h-0" + > + <%= for svc <- @cpu_services do %> + <.utilization_row service={svc} type="cpu" /> + <% end %> + <%= for svc <- @memory_services do %> + <.memory_utilization_row service={svc} /> + <% end %> + <%= for svc <- @disk_services do %> + <.disk_utilization_row service={svc} /> + <% end %> + </div> + </div> + </.ui_panel> + """ + end + + attr :data, :map, required: true + attr :loading, :boolean, default: false + + def bandwidth_widget(assigns) do + data = assigns.data || %{} + targets = Map.get(data, :targets, []) + total_download = Map.get(data, :total_download, 0.0) + total_upload = Map.get(data, :total_upload, 0.0) + avg_latency = Map.get(data, :avg_latency, 0.0) + target_count = Map.get(data, :target_count, 0) + + assigns = + assigns + |> assign(:targets, targets) + |> assign(:total_download, total_download) + |> assign(:total_upload, total_upload) + |> assign(:avg_latency, avg_latency) + |> assign(:target_count, target_count) + + ~H""" + <.ui_panel class="h-80"> + <:header> + <.link + href={~p"/dashboard?#{%{q: "in:rperf_targets time:last_1h sort:timestamp:desc"}}"} + class="hover:text-primary transition-colors" + > + <div class="text-sm font-semibold">Bandwidth Tracker</div> + </.link> + <.link + href={~p"/dashboard?#{%{q: "in:rperf_targets time:last_1h sort:timestamp:desc limit:50"}}"} + class="text-base-content/60 hover:text-primary" + title="View bandwidth data" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + </.link> + </:header> + + <div :if={@loading} class="flex-1 flex items-center justify-center"> + <span class="loading loading-spinner loading-md" /> + </div> + + <div :if={not @loading} class="flex flex-col h-full"> + <div class="grid grid-cols-3 gap-2 mb-4"> + <div class="rounded-lg bg-success/10 p-2 text-center"> + <div class="text-lg font-bold text-success">{format_mbps(@total_download)}</div> + <div class="text-[10px] text-base-content/60">Download</div> + </div> + <div class="rounded-lg bg-primary/10 p-2 text-center"> + <div class="text-lg font-bold text-primary">{format_mbps(@total_upload)}</div> + <div class="text-[10px] text-base-content/60">Upload</div> + </div> + <div class="rounded-lg bg-base-200/50 p-2 text-center"> + <div class="text-lg font-bold">{@avg_latency}ms</div> + <div class="text-[10px] text-base-content/60">Avg Latency</div> + </div> + </div> + + <div class="text-xs text-base-content/50 mb-2">{@target_count} targets</div> + + <div :if={@targets == []} class="flex-1 flex items-center justify-center"> + <div class="text-center"> + <.icon name="hero-signal" class="size-6 mx-auto mb-1 text-base-content/40" /> + <p class="text-xs text-base-content/60">No bandwidth data</p> + </div> + </div> + + <div :if={@targets != []} class="flex-1 overflow-y-auto min-h-0"> + <table class="table table-xs w-full"> + <thead> + <tr class="text-[10px]"> + <th class="text-base-content/60">Target</th> + <th class="text-right text-base-content/60">DL</th> + <th class="text-right text-base-content/60">UL</th> + <th class="text-right text-base-content/60">Lat</th> + </tr> + </thead> + <tbody> + <%= for target <- @targets do %> + <tr class="hover:bg-base-200/50"> + <td class="truncate max-w-[100px] text-xs" title={target.name}>{target.name}</td> + <td class="text-right text-xs text-success">{format_mbps(target.download_mbps)}</td> + <td class="text-right text-xs text-primary">{format_mbps(target.upload_mbps)}</td> + <td class="text-right text-xs font-mono">{round(target.latency_ms)}ms</td> + </tr> + <% end %> + </tbody> + </table> + </div> + </div> + </.ui_panel> + """ + end + + attr :service, :map, required: true + attr :type, :string, default: "cpu" + + defp utilization_row(assigns) do + svc = assigns.service + + cpu = + extract_numeric( + Map.get(svc, "value") || Map.get(svc, "cpu_usage") || Map.get(svc, "usage_percent") || + Map.get(svc, "user") || 0 + ) + + mem = extract_numeric(Map.get(svc, "memory_usage") || Map.get(svc, "mem_percent") || 0) + host = Map.get(svc, "host") || Map.get(svc, "device_id") || "Unknown" + + assigns = + assigns + |> assign(:cpu, cpu) + |> assign(:mem, mem) + |> assign(:host, host) + + ~H""" + <div class="flex items-center gap-2 p-1.5 rounded bg-base-200/50 text-xs"> + <div class="truncate flex-1 font-medium" title={@host}>{@host}</div> + <div class="flex items-center gap-2 shrink-0"> + <span class={["badge badge-xs", cpu_badge_class(@cpu)]}>CPU {@cpu |> round()}%</span> + <span :if={@mem > 0} class={["badge badge-xs", cpu_badge_class(@mem)]}> + MEM {@mem |> round()}% + </span> + </div> + </div> + """ + end + + attr :service, :map, required: true + + defp memory_utilization_row(assigns) do + svc = assigns.service + + percent = + extract_numeric( + Map.get(svc, "percent") || Map.get(svc, "value") || Map.get(svc, "used_percent") || 0 + ) + + host = Map.get(svc, "host") || Map.get(svc, "device_id") || "Unknown" + + assigns = + assigns + |> assign(:percent, percent) + |> assign(:host, host) + + ~H""" + <div class="flex items-center gap-2 p-1.5 rounded bg-base-200/50 text-xs"> + <div class="truncate flex-1 font-medium" title={@host}>{@host}</div> + <div class="shrink-0"> + <span class={["badge badge-xs", memory_badge_class(@percent)]}> + MEM {@percent |> round()}% + </span> + </div> + </div> + """ + end + + attr :service, :map, required: true + + defp disk_utilization_row(assigns) do + svc = assigns.service + percent = extract_numeric(Map.get(svc, "percent") || Map.get(svc, "value") || 0) + host = Map.get(svc, "host") || Map.get(svc, "device_id") || "Unknown" + mount = Map.get(svc, "mount_point") || Map.get(svc, "mount") || "/" + + assigns = + assigns + |> assign(:percent, percent) + |> assign(:host, host) + |> assign(:mount, mount) + + ~H""" + <div class="flex items-center gap-2 p-1.5 rounded bg-base-200/50 text-xs"> + <div class="truncate flex-1 min-w-0"> + <span class="font-medium" title={@host}>{@host}</span> + <span class="text-base-content/50 ml-1" title={@mount}>{@mount}</span> + </div> + <div class="shrink-0"> + <span class={["badge badge-xs", disk_badge_class(@percent)]}> + DISK {@percent |> round()}% + </span> + </div> + </div> + """ + end + + defp cpu_badge_class(value) when value >= 90, do: "badge-error" + defp cpu_badge_class(value) when value >= 80, do: "badge-warning" + defp cpu_badge_class(value) when value >= 70, do: "badge-info" + defp cpu_badge_class(_), do: "badge-ghost" + + defp memory_badge_class(value) when value >= 90, do: "badge-error" + defp memory_badge_class(value) when value >= 85, do: "badge-warning" + defp memory_badge_class(value) when value >= 70, do: "badge-info" + defp memory_badge_class(_), do: "badge-ghost" + + defp disk_badge_class(value) when value >= 90, do: "badge-error" + defp disk_badge_class(value) when value >= 85, do: "badge-warning" + defp disk_badge_class(value) when value >= 70, do: "badge-info" + defp disk_badge_class(_), do: "badge-ghost" + + defp span_name(span) when is_map(span) do + name = + Map.get(span, "name") || + Map.get(span, "span_name") || + Map.get(span, "root_span_name") || + Map.get(span, "operation") + + service = Map.get(span, "service_name") || Map.get(span, "root_service_name") + + case {name, service} do + {nil, nil} -> "Unknown" + {nil, svc} -> svc + {n, nil} -> n + {n, svc} -> "#{svc}: #{n}" + end + end + + defp span_name(_), do: "Unknown" + + defp span_duration(span) when is_map(span) do + # Use pre-calculated duration_ms if available, otherwise calculate + case Map.get(span, "duration_ms") do + ms when is_number(ms) -> + ms + + _ -> + start_nano = extract_numeric(Map.get(span, "start_time_unix_nano")) + end_nano = extract_numeric(Map.get(span, "end_time_unix_nano")) + + if is_number(start_nano) and is_number(end_nano) and end_nano > start_nano do + (end_nano - start_nano) / 1_000_000 + else + 0 + end + end + end + + defp span_duration(_), do: 0 + + defp format_duration(ms) when is_number(ms) do + cond do + ms >= 60_000 -> "#{Float.round(ms / 60_000, 1)}m" + ms >= 1000 -> "#{Float.round(ms / 1000, 1)}s" + true -> "#{round(ms)}ms" + end + end + + defp format_duration(_), do: "0ms" + + defp format_mbps(value) when is_number(value) do + cond do + value >= 1000 -> "#{Float.round(value / 1000, 1)} Gbps" + value >= 1 -> "#{Float.round(value, 1)} Mbps" + value > 0 -> "#{round(value * 1000)} Kbps" + true -> "0" + end + end + + defp format_mbps(_), do: "0" + + attr :label, :string, required: true + attr :count, :integer, required: true + attr :total, :integer, required: true + attr :color, :string, required: true + attr :href, :string, required: true + + def severity_row(assigns) do + pct = if assigns.total > 0, do: round(assigns.count / assigns.total * 100), else: 0 + assigns = assign(assigns, :pct, pct) + + ~H""" + <tr class="hover:bg-base-200/50 cursor-pointer" onclick={"window.location.href='#{@href}'"}> + <td class={severity_text_class(@color)}>{@label}</td> + <td class={["text-center font-bold", severity_text_class(@color)]}>{format_number(@count)}</td> + <td class={["text-center text-xs", severity_text_class(@color)]}>{@pct}%</td> + </tr> + """ + end + + defp severity_text_class("error"), do: "text-error" + defp severity_text_class("warning"), do: "text-warning" + defp severity_text_class("info"), do: "text-info" + defp severity_text_class("primary"), do: "text-primary" + defp severity_text_class(_), do: "text-base-content/60" + + attr :event, :map, required: true + + def event_entry(assigns) do + ~H""" + <div class="p-2 rounded-lg bg-base-200/50 hover:bg-base-200 transition-colors"> + <div class="flex items-start gap-2"> + <.icon + name={severity_icon(@event["severity"])} + class={["size-4 mt-0.5", severity_text_class(severity_color(@event["severity"]))]} + /> + <div class="flex-1 min-w-0"> + <div class="text-sm font-medium truncate">{@event["host"] || "Unknown"}</div> + <div class="text-xs text-base-content/60 truncate"> + {@event["short_message"] || "No details"} + </div> + <div class={["text-xs", severity_text_class(severity_color(@event["severity"]))]}> + {@event["severity"] || "Unknown"} · {format_relative_time(@event["event_timestamp"])} + </div> + </div> + </div> + </div> + """ + end + + attr :log, :map, required: true + + def log_entry(assigns) do + ~H""" + <div class="p-2 rounded-lg bg-base-200/50 hover:bg-base-200 transition-colors"> + <div class="flex items-start gap-2"> + <.icon + name={log_level_icon(@log["severity_text"])} + class={["size-4 mt-0.5", severity_text_class(log_level_color(@log["severity_text"]))]} + /> + <div class="flex-1 min-w-0"> + <div class="text-sm font-medium truncate">{@log["service_name"] || "Unknown Service"}</div> + <div class="text-xs text-base-content/60 truncate">{truncate_message(@log["body"])}</div> + <div class={["text-xs", severity_text_class(log_level_color(@log["severity_text"]))]}> + {normalize_log_level(@log["severity_text"])} · {format_relative_time(@log["timestamp"])} + </div> + </div> + </div> + </div> + """ + end + + defp severity_icon(severity) do + case normalize_severity(severity) do + "Critical" -> "hero-shield-exclamation" + "High" -> "hero-exclamation-triangle" + "Medium" -> "hero-exclamation-circle" + "Low" -> "hero-information-circle" + _ -> "hero-exclamation-circle" + end + end + + defp severity_color(severity) do + case normalize_severity(severity) do + "Critical" -> "error" + "High" -> "warning" + "Medium" -> "info" + "Low" -> "primary" + _ -> "neutral" + end + end + + defp log_level_icon(level) do + case normalize_log_level(level) do + "Fatal" -> "hero-x-circle" + "Error" -> "hero-exclamation-circle" + "Warning" -> "hero-exclamation-triangle" + "Info" -> "hero-information-circle" + "Debug" -> "hero-document-text" + _ -> "hero-document-text" + end + end + + defp log_level_color(level) do + case normalize_log_level(level) do + "Fatal" -> "error" + "Error" -> "warning" + "Warning" -> "info" + "Info" -> "primary" + _ -> "neutral" + end + end + + defp truncate_message(nil), do: "" + + defp truncate_message(msg) when is_binary(msg) do + if String.length(msg) > 80 do + String.slice(msg, 0, 80) <> "..." + else + msg + end + end + + defp truncate_message(_), do: "" + + defp format_relative_time(nil), do: "Unknown" + + defp format_relative_time(timestamp) when is_binary(timestamp) do + case DateTime.from_iso8601(timestamp) do + {:ok, dt, _offset} -> + now = DateTime.utc_now() + diff_seconds = DateTime.diff(now, dt, :second) + + cond do + diff_seconds < 60 -> "Just now" + diff_seconds < 3600 -> "#{div(diff_seconds, 60)}m ago" + diff_seconds < 86400 -> "#{div(diff_seconds, 3600)}h ago" + diff_seconds < 604_800 -> "#{div(diff_seconds, 86400)}d ago" + true -> Calendar.strftime(dt, "%b %d") + end + + _ -> + "Unknown" + end + end + + defp format_relative_time(_), do: "Unknown" +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/dashboard_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/dashboard_live/index.ex new file mode 100644 index 000000000..d03f16a9a --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/dashboard_live/index.ex @@ -0,0 +1,118 @@ +defmodule ServiceRadarWebNGWeb.DashboardLive.Index do + use ServiceRadarWebNGWeb, :live_view + + alias ServiceRadarWebNGWeb.Dashboard.Engine + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + @default_limit 100 + @max_limit 500 + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Dashboard") + |> assign(:results, []) + |> assign(:panels, []) + |> assign(:limit, @default_limit) + |> SRQLPage.init("cpu_metrics", default_limit: @default_limit, builder_available: true)} + end + + @impl true + def handle_params(params, uri, socket) do + socket = + SRQLPage.load_list(socket, params, uri, :results, + default_limit: @default_limit, + max_limit: @max_limit + ) + + srql_response = %{ + "results" => socket.assigns.results, + "viz" => get_in(socket.assigns, [:srql, :viz]) + } + + {:noreply, assign(socket, :panels, Engine.build_panels(srql_response))} + end + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_submit", params, fallback_path: "/dashboard")} + end + + def handle_event("srql_builder_toggle", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: "cpu_metrics")} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_run", %{}, fallback_path: "/dashboard")} + end + + def handle_event("srql_builder_add_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_add_filter", params, entity: "cpu_metrics")} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, entity: "cpu_metrics")} + end + + @impl true + def render(assigns) do + ~H""" + <Layouts.app flash={@flash} current_scope={@current_scope} srql={@srql}> + <div class="mx-auto max-w-7xl p-6"> + <.header> + Dashboard + <:subtitle>Auto-generated panels based on your SRQL query.</:subtitle> + <:actions> + <.ui_dropdown> + <:trigger> + <.ui_icon_button aria-label="Dashboard actions" title="Dashboard actions"> + <.icon name="hero-ellipsis-horizontal" class="size-4" /> + </.ui_icon_button> + </:trigger> + <:item> + <button type="button" class="w-full text-left opacity-60 cursor-not-allowed"> + Save dashboard (soon) + </button> + </:item> + <:item> + <button type="button" class="w-full text-left opacity-60 cursor-not-allowed"> + Add panel (soon) + </button> + </:item> + </.ui_dropdown> + </:actions> + </.header> + + <div class="grid grid-cols-1 gap-6"> + <%= for panel <- @panels do %> + <.live_component + module={panel.plugin} + id={panel.id} + title={panel.title} + panel_assigns={panel.assigns} + /> + <% end %> + + <.srql_results_table id="dashboard-results" rows={@results} empty_message="No results." /> + </div> + </div> + </Layouts.app> + """ + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/device_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/device_live/index.ex new file mode 100644 index 000000000..6a8f032a5 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/device_live/index.ex @@ -0,0 +1,651 @@ +defmodule ServiceRadarWebNGWeb.DeviceLive.Index do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + @default_limit 20 + @max_limit 100 + @sparkline_device_cap 200 + @sparkline_points_per_device 20 + @sparkline_bucket "5m" + @sparkline_window "last_1h" + @sparkline_threshold_ms 100.0 + @presence_window "last_24h" + @presence_bucket "24h" + @presence_device_cap 200 + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Devices") + |> assign(:devices, []) + |> assign(:icmp_sparklines, %{}) + |> assign(:icmp_error, nil) + |> assign(:snmp_presence, %{}) + |> assign(:sysmon_presence, %{}) + |> assign(:limit, @default_limit) + |> SRQLPage.init("devices", default_limit: @default_limit)} + end + + @impl true + def handle_params(params, uri, socket) do + socket = + socket + |> SRQLPage.load_list(params, uri, :devices, + default_limit: @default_limit, + max_limit: @max_limit + ) + + {icmp_sparklines, icmp_error} = load_icmp_sparklines(srql_module(), socket.assigns.devices) + {snmp_presence, sysmon_presence} = load_metric_presence(srql_module(), socket.assigns.devices) + + {:noreply, + assign(socket, + icmp_sparklines: icmp_sparklines, + icmp_error: icmp_error, + snmp_presence: snmp_presence, + sysmon_presence: sysmon_presence + )} + end + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_submit", params, fallback_path: "/devices")} + end + + def handle_event("srql_builder_toggle", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: "devices")} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_run", %{}, fallback_path: "/devices")} + end + + def handle_event("srql_builder_add_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_add_filter", params, entity: "devices")} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, entity: "devices")} + end + + @impl true + def render(assigns) do + pagination = get_in(assigns, [:srql, :pagination]) || %{} + assigns = assign(assigns, :pagination, pagination) + + ~H""" + <Layouts.app flash={@flash} current_scope={@current_scope} srql={@srql}> + <div class="mx-auto max-w-7xl p-6"> + <.ui_panel> + <:header> + <div :if={is_binary(@icmp_error)} class="badge badge-warning badge-sm"> + ICMP: {@icmp_error} + </div> + </:header> + + <div class="overflow-x-auto"> + <table class="table table-sm table-zebra w-full"> + <thead> + <tr> + <th class="text-xs font-semibold text-base-content/70 bg-base-200/60">Device</th> + <th class="text-xs font-semibold text-base-content/70 bg-base-200/60">Hostname</th> + <th class="text-xs font-semibold text-base-content/70 bg-base-200/60">IP</th> + <th + class="text-xs font-semibold text-base-content/70 bg-base-200/60" + title="GRPC Health Check Status" + > + Status + </th> + <th + class="text-xs font-semibold text-base-content/70 bg-base-200/60" + title="ICMP Network Tests" + > + Network + </th> + <th + class="text-xs font-semibold text-base-content/70 bg-base-200/60" + title="Telemetry availability for this device" + > + Metrics + </th> + <th class="text-xs font-semibold text-base-content/70 bg-base-200/60">Poller</th> + <th class="text-xs font-semibold text-base-content/70 bg-base-200/60">Last Seen</th> + </tr> + </thead> + <tbody> + <tr :if={@devices == []}> + <td colspan="8" class="py-8 text-center text-sm text-base-content/60"> + No devices found. + </td> + </tr> + + <%= for row <- Enum.filter(@devices, &is_map/1) do %> + <% device_id = Map.get(row, "device_id") || Map.get(row, "id") %> + <% icmp = + if is_binary(device_id), do: Map.get(@icmp_sparklines, device_id), else: nil %> + <% has_snmp = + is_binary(device_id) and Map.get(@snmp_presence, device_id, false) == true %> + <% has_sysmon = + is_binary(device_id) and Map.get(@sysmon_presence, device_id, false) == true %> + <tr class="hover:bg-base-200/40"> + <td class="font-mono text-xs"> + <.link + :if={is_binary(device_id)} + navigate={~p"/devices/#{device_id}"} + class="link link-hover" + > + {device_id} + </.link> + <span :if={not is_binary(device_id)} class="text-base-content/70">—</span> + </td> + <td class="text-sm max-w-[18rem] truncate">{Map.get(row, "hostname") || "—"}</td> + <td class="font-mono text-xs">{Map.get(row, "ip") || "—"}</td> + <td class="text-xs"> + <.availability_badge available={Map.get(row, "is_available")} /> + </td> + <td class="text-xs"> + <.icmp_sparkline :if={is_map(icmp)} spark={icmp} /> + <span :if={not is_map(icmp)} class="text-base-content/40">—</span> + </td> + <td class="text-xs"> + <.metrics_presence + device_id={device_id} + has_snmp={has_snmp} + has_sysmon={has_sysmon} + /> + </td> + <td class="font-mono text-xs">{Map.get(row, "poller_id") || "—"}</td> + <td class="font-mono text-xs"> + <.srql_cell col="last_seen" value={Map.get(row, "last_seen")} /> + </td> + </tr> + <% end %> + </tbody> + </table> + </div> + + <div class="mt-4 pt-4 border-t border-base-200"> + <.ui_pagination + prev_cursor={Map.get(@pagination, "prev_cursor")} + next_cursor={Map.get(@pagination, "next_cursor")} + base_path="/devices" + query={Map.get(@srql, :query, "")} + limit={@limit} + result_count={length(@devices)} + /> + </div> + </.ui_panel> + </div> + </Layouts.app> + """ + end + + attr :available, :any, default: nil + + def availability_badge(assigns) do + {label, variant} = + case assigns.available do + true -> {"Online", "success"} + false -> {"Offline", "error"} + _ -> {"Unknown", "ghost"} + end + + assigns = + assigns + |> assign(:label, label) + |> assign(:variant, variant) + + ~H""" + <.ui_badge variant={@variant} size="xs">{@label}</.ui_badge> + """ + end + + attr :spark, :map, required: true + + def icmp_sparkline(assigns) do + points = Map.get(assigns.spark, :points, []) + {stroke_path, area_path} = sparkline_smooth_paths(points) + + assigns = + assigns + |> assign(:points, points) + |> assign(:latest_ms, Map.get(assigns.spark, :latest_ms, 0.0)) + |> assign(:tone, Map.get(assigns.spark, :tone, "success")) + |> assign(:title, Map.get(assigns.spark, :title)) + |> assign(:stroke_path, stroke_path) + |> assign(:area_path, area_path) + |> assign(:stroke_color, tone_stroke(Map.get(assigns.spark, :tone, "success"))) + |> assign(:spark_id, "spark-#{:erlang.phash2(Map.get(assigns.spark, :title, ""))}") + + ~H""" + <div class="flex items-center gap-2"> + <div class="h-8 w-20 rounded-md bg-base-200/30 px-1 py-0.5 overflow-hidden"> + <svg viewBox="0 0 400 120" class="w-full h-full" preserveAspectRatio="none"> + <title>{@title || "ICMP latency"} + + + + + + + + + + +
+ {format_ms(@latest_ms)} +
+ + """ + end + + attr :device_id, :string, default: nil + attr :has_snmp, :boolean, default: false + attr :has_sysmon, :boolean, default: false + + def metrics_presence(assigns) do + device_path = + if is_binary(assigns.device_id) and String.trim(assigns.device_id) != "" do + ~p"/devices/#{assigns.device_id}" + else + nil + end + + assigns = assign(assigns, :device_path, device_path) + + ~H""" +
+ <.link + :if={@has_snmp and is_binary(@device_path)} + navigate={@device_path} + class="tooltip inline-flex hover:opacity-90" + data-tip="SNMP metrics available (last 24h)" + aria-label="View device details (SNMP metrics available)" + > + <.icon name="hero-signal" class="size-4 text-info" /> + + + <.icon name="hero-signal" class="size-4 text-info" /> + + + <.link + :if={@has_sysmon and is_binary(@device_path)} + navigate={@device_path} + class="tooltip inline-flex hover:opacity-90" + data-tip="Sysmon metrics available (last 24h)" + aria-label="View device details (Sysmon metrics available)" + > + <.icon name="hero-cpu-chip" class="size-4 text-success" /> + + + <.icon name="hero-cpu-chip" class="size-4 text-success" /> + +
+ + """ + end + + defp tone_stroke("error"), do: "#ff5555" + defp tone_stroke("warning"), do: "#ffb86c" + defp tone_stroke("success"), do: "#50fa7b" + defp tone_stroke(_), do: "#6272a4" + + defp format_ms(value) when is_float(value) do + :erlang.float_to_binary(value, decimals: 1) <> "ms" + end + + defp format_ms(value) when is_integer(value), do: Integer.to_string(value) <> "ms" + defp format_ms(_), do: "—" + + # Generate smooth SVG paths using monotone cubic interpolation (Catmull-Rom spline) + defp sparkline_smooth_paths(values) when is_list(values) do + values = Enum.filter(values, &is_number/1) + + case {values, Enum.min(values, fn -> 0 end), Enum.max(values, fn -> 0 end)} do + {[], _, _} -> + {"", ""} + + {[_single], _, _} -> + # Single point - just draw a small line + {"M 200,60 L 200,60", ""} + + {_values, min_v, max_v} -> + # Normalize values to coordinates + range = if max_v == min_v, do: 1.0, else: max_v - min_v + len = length(values) + + coords = + Enum.with_index(values) + |> Enum.map(fn {v, idx} -> + x = idx_to_x(idx, len) + y = 110.0 - (v - min_v) / range * 100.0 + {x * 1.0, y} + end) + + stroke_path = monotone_curve_path(coords) + area_path = monotone_area_path(coords) + {stroke_path, area_path} + end + end + + defp sparkline_smooth_paths(_), do: {"", ""} + + # Monotone cubic interpolation for smooth curves that don't overshoot + defp monotone_curve_path([]), do: "" + defp monotone_curve_path([{x, y}]), do: "M #{fmt(x)},#{fmt(y)}" + + defp monotone_curve_path(coords) do + [{x0, y0} | _rest] = coords + tangents = compute_tangents(coords) + + # Start with first point + segments = ["M #{fmt(x0)},#{fmt(y0)}"] + + # Build cubic bezier segments + curve_segments = + Enum.zip([coords, tl(coords), tangents, tl(tangents)]) + |> Enum.map(fn {{x0, y0}, {x1, y1}, t0, t1} -> + dx = (x1 - x0) / 3.0 + cp1x = x0 + dx + cp1y = y0 + t0 * dx + cp2x = x1 - dx + cp2y = y1 - t1 * dx + "C #{fmt(cp1x)},#{fmt(cp1y)} #{fmt(cp2x)},#{fmt(cp2y)} #{fmt(x1)},#{fmt(y1)}" + end) + + Enum.join(segments ++ curve_segments, " ") + end + + defp monotone_area_path([]), do: "" + defp monotone_area_path([_]), do: "" + + defp monotone_area_path(coords) do + [{first_x, _} | _] = coords + {last_x, _} = List.last(coords) + baseline = 115.0 + + stroke = monotone_curve_path(coords) + "#{stroke} L #{fmt(last_x)},#{fmt(baseline)} L #{fmt(first_x)},#{fmt(baseline)} Z" + end + + # Compute tangents for monotone interpolation + defp compute_tangents(coords) when length(coords) < 2, do: [] + + defp compute_tangents(coords) do + # Compute slopes between consecutive points + slopes = + Enum.zip(coords, tl(coords)) + |> Enum.map(fn {{x0, y0}, {x1, y1}} -> + dx = x1 - x0 + if dx == 0, do: 0.0, else: (y1 - y0) / dx + end) + + # Compute tangents using monotone method + n = length(coords) + + Enum.map(0..(n - 1), fn i -> + cond do + i == 0 -> + # First point - use first slope + Enum.at(slopes, 0) || 0.0 + + i == n - 1 -> + # Last point - use last slope + List.last(slopes) || 0.0 + + true -> + # Interior points - average of adjacent slopes, clamped for monotonicity + s0 = Enum.at(slopes, i - 1) || 0.0 + s1 = Enum.at(slopes, i) || 0.0 + + if s0 * s1 <= 0 do + # Different signs - use 0 to avoid overshooting + 0.0 + else + # Same sign - use harmonic mean for smoothness + 2.0 * s0 * s1 / (s0 + s1) + end + end + end) + end + + defp fmt(num) when is_float(num), do: :erlang.float_to_binary(num, decimals: 1) + defp fmt(num) when is_integer(num), do: Integer.to_string(num) + + defp idx_to_x(_idx, 0), do: 0 + defp idx_to_x(0, _len), do: 0 + + defp idx_to_x(idx, len) when len > 1 do + round(idx / (len - 1) * 400) + end + + defp load_icmp_sparklines(srql_module, devices) do + device_ids = + devices + |> Enum.filter(&is_map/1) + |> Enum.map(fn row -> Map.get(row, "device_id") || Map.get(row, "id") end) + |> Enum.filter(&is_binary/1) + |> Enum.map(&String.trim/1) + |> Enum.reject(&(&1 == "")) + |> Enum.uniq() + |> Enum.take(@sparkline_device_cap) + + if device_ids == [] do + {%{}, nil} + else + query = + [ + "in:timeseries_metrics", + "metric_type:icmp", + "device_id:(#{Enum.map_join(device_ids, ",", &escape_list_value/1)})", + "time:#{@sparkline_window}", + "bucket:#{@sparkline_bucket}", + "agg:avg", + "series:device_id", + "limit:#{min(length(device_ids) * @sparkline_points_per_device, 4000)}" + ] + |> Enum.join(" ") + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) -> + {build_icmp_sparklines(rows), nil} + + {:ok, other} -> + {%{}, "unexpected SRQL response: #{inspect(other)}"} + + {:error, reason} -> + {%{}, format_error(reason)} + end + end + end + + defp escape_list_value(value) when is_binary(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("\"", "\\\"") + |> then(&"\"#{&1}\"") + end + + defp load_metric_presence(srql_module, devices) do + device_ids = + devices + |> Enum.filter(&is_map/1) + |> Enum.map(fn row -> Map.get(row, "device_id") || Map.get(row, "id") end) + |> Enum.filter(&is_binary/1) + |> Enum.map(&String.trim/1) + |> Enum.reject(&(&1 == "")) + |> Enum.uniq() + |> Enum.take(@presence_device_cap) + + if device_ids == [] do + {%{}, %{}} + else + list = Enum.map_join(device_ids, ",", &escape_list_value/1) + limit = min(length(device_ids) * 3, 2000) + + snmp_query = + [ + "in:snmp_metrics", + "device_id:(#{list})", + "time:#{@presence_window}", + "bucket:#{@presence_bucket}", + "agg:count", + "series:device_id", + "limit:#{limit}" + ] + |> Enum.join(" ") + + sysmon_query = + [ + "in:cpu_metrics", + "device_id:(#{list})", + "time:#{@presence_window}", + "bucket:#{@presence_bucket}", + "agg:count", + "series:device_id", + "limit:#{limit}" + ] + |> Enum.join(" ") + + {snmp_presence, sysmon_presence} = + [snmp: snmp_query, sysmon: sysmon_query] + |> Task.async_stream( + fn {key, query} -> {key, srql_module.query(query)} end, + ordered: false, + timeout: 30_000 + ) + |> Enum.reduce({%{}, %{}}, fn + {:ok, {:snmp, {:ok, %{"results" => rows}}}}, {_snmp, sysmon} -> + {presence_from_downsample(rows), sysmon} + + {:ok, {:sysmon, {:ok, %{"results" => rows}}}}, {snmp, _sysmon} -> + {snmp, presence_from_downsample(rows)} + + _, acc -> + acc + end) + + {snmp_presence, sysmon_presence} + end + end + + defp presence_from_downsample(rows) when is_list(rows) do + rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + series = Map.get(row, "series") + value = Map.get(row, "value") + + if is_binary(series) and series != "" and is_number(value) and value > 0 do + Map.put(acc, series, true) + else + acc + end + end) + end + + defp presence_from_downsample(_), do: %{} + + defp build_icmp_sparklines(rows) when is_list(rows) do + rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + device_id = Map.get(row, "series") || Map.get(row, "device_id") + timestamp = Map.get(row, "timestamp") + value_ms = latency_ms(Map.get(row, "value")) + + if is_binary(device_id) and value_ms > 0 do + Map.update( + acc, + device_id, + [%{ts: timestamp, v: value_ms}], + fn existing -> existing ++ [%{ts: timestamp, v: value_ms}] end + ) + else + acc + end + end) + |> Map.new(fn {device_id, points} -> + points = + points + |> Enum.sort_by(fn p -> p.ts end) + |> Enum.take(-@sparkline_points_per_device) + + values = Enum.map(points, & &1.v) + latest_ms = List.last(values) || 0.0 + + tone = + cond do + latest_ms >= @sparkline_threshold_ms -> "warning" + latest_ms > 0 -> "success" + true -> "ghost" + end + + title = + case List.last(points) do + %{ts: ts} when is_binary(ts) -> "ICMP #{format_ms(latest_ms)} · #{ts}" + _ -> "ICMP #{format_ms(latest_ms)}" + end + + {device_id, %{points: values, latest_ms: latest_ms, tone: tone, title: title}} + end) + end + + defp build_icmp_sparklines(_), do: %{} + + defp latency_ms(value) when is_float(value) or is_integer(value) do + raw = if is_integer(value), do: value * 1.0, else: value + if raw > 1_000_000.0, do: raw / 1_000_000.0, else: raw + end + + defp latency_ms(value) when is_binary(value) do + case Float.parse(String.trim(value)) do + {parsed, ""} -> latency_ms(parsed) + _ -> 0.0 + end + end + + defp latency_ms(_), do: 0.0 + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/device_live/show.ex b/web-ng/lib/serviceradar_web_ng_web/live/device_live/show.ex new file mode 100644 index 000000000..7bbe9d27d --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/device_live/show.ex @@ -0,0 +1,1260 @@ +defmodule ServiceRadarWebNGWeb.DeviceLive.Show do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + alias ServiceRadarWebNGWeb.Dashboard.Engine + alias ServiceRadarWebNGWeb.Dashboard.Plugins.Table, as: TablePlugin + + @default_limit 50 + @max_limit 200 + @metrics_limit 200 + @availability_window "last_24h" + @availability_bucket "30m" + + @impl true + def mount(_params, _session, socket) do + srql = %{ + enabled: true, + entity: "devices", + page_path: nil, + query: nil, + draft: nil, + error: nil, + viz: nil, + loading: false, + builder_available: false, + builder_open: false, + builder_supported: false, + builder_sync: false, + builder: %{} + } + + {:ok, + socket + |> assign(:page_title, "Device") + |> assign(:device_id, nil) + |> assign(:results, []) + |> assign(:panels, []) + |> assign(:metric_sections, []) + |> assign(:sysmon_summary, nil) + |> assign(:availability, nil) + |> assign(:healthcheck_summary, nil) + |> assign(:limit, @default_limit) + |> assign(:srql, srql)} + end + + @impl true + def handle_params(%{"device_id" => device_id} = params, uri, socket) do + limit = parse_limit(Map.get(params, "limit"), @default_limit, @max_limit) + + default_query = + "in:devices device_id:\"#{escape_value(device_id)}\" limit:#{limit}" + + query = + params + |> Map.get("q", default_query) + |> to_string() + |> String.trim() + |> case do + "" -> default_query + other -> other + end + + srql_module = srql_module() + + {results, error, viz} = + case srql_module.query(query) do + {:ok, %{"results" => results} = resp} when is_list(results) -> + viz = + case Map.get(resp, "viz") do + value when is_map(value) -> value + _ -> nil + end + + {results, nil, viz} + + {:ok, other} -> + {[], "unexpected SRQL response: #{inspect(other)}", nil} + + {:error, reason} -> + {[], "SRQL error: #{format_error(reason)}", nil} + end + + page_path = uri |> to_string() |> URI.parse() |> Map.get(:path) + + srql = + socket.assigns.srql + |> Map.merge(%{ + entity: "devices", + page_path: page_path, + query: query, + draft: query, + error: error, + viz: viz, + loading: false + }) + + srql_response = %{"results" => results, "viz" => viz} + + metric_sections = load_metric_sections(srql_module, device_id) + sysmon_summary = load_sysmon_summary(srql_module, device_id) + availability = load_availability(srql_module, device_id) + healthcheck_summary = load_healthcheck_summary(srql_module, device_id) + + {:noreply, + socket + |> assign(:device_id, device_id) + |> assign(:limit, limit) + |> assign(:results, results) + |> assign(:panels, Engine.build_panels(srql_response)) + |> assign(:metric_sections, metric_sections) + |> assign(:sysmon_summary, sysmon_summary) + |> assign(:availability, availability) + |> assign(:healthcheck_summary, healthcheck_summary) + |> assign(:srql, srql)} + end + + @impl true + def handle_event("srql_change", %{"q" => q}, socket) do + {:noreply, assign(socket, :srql, Map.put(socket.assigns.srql, :draft, to_string(q)))} + end + + def handle_event("srql_submit", %{"q" => q}, socket) do + page_path = socket.assigns.srql[:page_path] || "/devices/#{socket.assigns.device_id}" + + query = + q + |> to_string() + |> String.trim() + |> case do + "" -> to_string(socket.assigns.srql[:query] || "") + other -> other + end + + {:noreply, + push_patch(socket, + to: page_path <> "?" <> URI.encode_query(%{"q" => query, "limit" => socket.assigns.limit}) + )} + end + + def handle_event(_event, _params, socket), do: {:noreply, socket} + + @impl true + def render(assigns) do + device_row = List.first(Enum.filter(assigns.results, &is_map/1)) + + assigns = + assigns + |> assign(:device_row, device_row) + |> assign( + :metric_sections_to_render, + Enum.filter(assigns.metric_sections, fn section -> + is_binary(Map.get(section, :error)) or Map.get(section, :panels, []) != [] + end) + ) + + ~H""" + +
+ <.header> + Device + <:subtitle> + {@device_id} + + <:actions> + <.ui_button href={~p"/devices"} variant="ghost" size="sm">Back to devices + + + +
+
+ No device row returned for this query. +
+ +
+
+ <.kv_inline label="Hostname" value={Map.get(@device_row, "hostname")} /> + <.kv_inline label="IP" value={Map.get(@device_row, "ip")} mono /> + <.kv_inline label="Poller" value={Map.get(@device_row, "poller_id")} mono /> + <.kv_inline label="Last Seen" value={Map.get(@device_row, "last_seen")} mono /> + <.kv_inline label="OS" value={Map.get(@device_row, "os_info")} /> + <.kv_inline label="Version" value={Map.get(@device_row, "version_info")} /> +
+
+ + <.availability_section :if={is_map(@availability)} availability={@availability} /> + + <.healthcheck_section :if={is_map(@healthcheck_summary)} summary={@healthcheck_summary} /> + + <.sysmon_summary_section :if={is_map(@sysmon_summary)} summary={@sysmon_summary} /> + + <%= for section <- @metric_sections_to_render do %> +
+
+
+ {section.title} + {section.subtitle} +
+
+ +
+ {section.error} +
+ +
+ <%= for panel <- section.panels do %> + <.live_component + module={panel.plugin} + id={"device-#{@device_id}-#{section.key}-#{panel.id}"} + title={section.title} + panel_assigns={Map.put(panel.assigns, :compact, true)} + /> + <% end %> +
+
+ <% end %> + + <%= for panel <- @panels do %> + <%= if panel.plugin == TablePlugin and length(@results) == 1 and is_map(@device_row) do %> + <.device_properties_card row={@device_row} /> + <% else %> + <.live_component + module={panel.plugin} + id={"device-#{panel.id}"} + title={panel.title} + panel_assigns={panel.assigns} + /> + <% end %> + <% end %> +
+
+
+ """ + end + + attr :row, :map, required: true + + defp device_properties_card(assigns) do + row = assigns.row || %{} + + keys = + row + |> Map.keys() + |> Enum.map(&to_string/1) + |> Enum.uniq() + + # Exclude metadata and fields already shown in the header card + excluded = ["metadata", "hostname", "ip", "poller_id", "last_seen", "os_info", "version_info"] + keys = Enum.reject(keys, &(&1 in excluded)) + + # Order remaining keys nicely + preferred = [ + "device_id", + "agent_id", + "device_type", + "service_type", + "service_status", + "is_available", + "last_heartbeat" + ] + + {preferred_keys, other_keys} = + Enum.split_with(keys, fn k -> k in preferred end) + + ordered_keys = + preferred + |> Enum.filter(&(&1 in preferred_keys)) + |> Kernel.++(Enum.sort(other_keys)) + + # Only show if there are properties to display + assigns = + assigns + |> assign(:ordered_keys, ordered_keys) + |> assign(:row, row) + |> assign(:has_properties, ordered_keys != []) + + ~H""" +
+
+ Device Properties + {length(@ordered_keys)} fields +
+ +
+
+ <%= for key <- @ordered_keys do %> +
+ {format_label(key)}: + + {format_prop_value(Map.get(@row, key))} + +
+ <% end %> +
+
+
+ """ + end + + defp format_label(key) when is_binary(key) do + key + |> String.replace("_", " ") + |> String.split(" ") + |> Enum.map(&String.capitalize/1) + |> Enum.join(" ") + end + + defp format_label(key), do: to_string(key) + + defp format_prop_value(nil), do: "—" + defp format_prop_value(""), do: "—" + defp format_prop_value(true), do: "Yes" + defp format_prop_value(false), do: "No" + defp format_prop_value(value) when is_binary(value), do: String.slice(value, 0, 100) + defp format_prop_value(value) when is_number(value), do: to_string(value) + + defp format_prop_value(value) when is_list(value) or is_map(value) do + "#{map_size_or_length(value)} items" + end + + defp format_prop_value(value), do: inspect(value) |> String.slice(0, 50) + + defp map_size_or_length(value) when is_map(value), do: map_size(value) + defp map_size_or_length(value) when is_list(value), do: length(value) + defp map_size_or_length(_), do: 0 + + attr :label, :string, required: true + attr :value, :any, default: nil + attr :mono, :boolean, default: false + + def kv_inline(assigns) do + ~H""" +
+ {@label}: + + {format_value(@value)} + +
+ """ + end + + defp format_value(nil), do: "—" + defp format_value(""), do: "—" + defp format_value(v) when is_binary(v), do: v + defp format_value(v), do: to_string(v) + + defp parse_limit(nil, default, _max), do: default + + defp parse_limit(limit, default, max) when is_binary(limit) do + case Integer.parse(limit) do + {value, ""} -> parse_limit(value, default, max) + _ -> default + end + end + + defp parse_limit(limit, _default, max) when is_integer(limit) and limit > 0 do + min(limit, max) + end + + defp parse_limit(_limit, default, _max), do: default + + defp escape_value(value) when is_binary(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("\"", "\\\"") + end + + defp escape_value(other), do: escape_value(to_string(other)) + + defp load_metric_sections(srql_module, device_id) do + device_id = escape_value(device_id) + + [ + %{ + key: "cpu", + title: "CPU", + entity: "cpu_metrics", + series: nil, + subtitle: "last 24h · 5m buckets · avg across cores" + }, + %{ + key: "memory", + title: "Memory", + entity: "memory_metrics", + series: "partition", + subtitle: "last 24h · 5m buckets · avg" + }, + %{ + key: "disk", + title: "Disk", + entity: "disk_metrics", + series: "mount_point", + subtitle: "last 24h · 5m buckets · avg" + } + ] + |> Enum.map(fn spec -> + query = metric_query(spec.entity, device_id, spec.series) + + base = %{ + key: spec.key, + title: spec.title, + subtitle: spec.subtitle, + query: query, + panels: [], + error: nil + } + + case srql_module.query(query) do + {:ok, %{"results" => results} = resp} when is_list(results) and results != [] -> + viz = + case Map.get(resp, "viz") do + value when is_map(value) -> value + _ -> nil + end + + srql_response = %{"results" => results, "viz" => viz} + + panels = + srql_response + |> Engine.build_panels() + |> prefer_visual_panels(results) + + %{base | panels: panels} + + {:ok, %{"results" => results}} when is_list(results) -> + base + + {:ok, other} -> + %{base | error: "unexpected SRQL response: #{inspect(other)}"} + + {:error, reason} -> + %{base | error: "SRQL error: #{format_error(reason)}"} + end + end) + end + + defp prefer_visual_panels(panels, results) when is_list(panels) do + has_non_table? = Enum.any?(panels, &(&1.plugin != TablePlugin)) + + if results != [] and has_non_table? do + Enum.reject(panels, &(&1.plugin == TablePlugin)) + else + panels + end + end + + defp prefer_visual_panels(panels, _results), do: panels + + defp metric_query(entity, device_id_escaped, series_field) do + series_field = + case series_field do + nil -> nil + "" -> nil + other -> to_string(other) |> String.trim() + end + + tokens = + [ + "in:#{entity}", + "device_id:\"#{device_id_escaped}\"", + "time:last_24h", + "bucket:5m", + "agg:avg", + "sort:timestamp:desc", + "limit:#{@metrics_limit}" + ] + + tokens = + if is_binary(series_field) and series_field != "" do + List.insert_at(tokens, 5, "series:#{series_field}") + else + tokens + end + + Enum.join(tokens, " ") + end + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + # --------------------------------------------------------------------------- + # Availability Section + # --------------------------------------------------------------------------- + + attr :availability, :map, required: true + + def availability_section(assigns) do + uptime_pct = Map.get(assigns.availability, :uptime_pct, 0.0) + total_checks = Map.get(assigns.availability, :total_checks, 0) + online_checks = Map.get(assigns.availability, :online_checks, 0) + offline_checks = Map.get(assigns.availability, :offline_checks, 0) + segments = Map.get(assigns.availability, :segments, []) + + assigns = + assigns + |> assign(:uptime_pct, uptime_pct) + |> assign(:total_checks, total_checks) + |> assign(:online_checks, online_checks) + |> assign(:offline_checks, offline_checks) + |> assign(:segments, segments) + + ~H""" +
+
+
+
+
Availability Timeline
+
+ Last 24h · each block = 30m bucket · green = online, red = offline +
+
+
+
{format_pct(@uptime_pct)}%
+
uptime (bucketed)
+
+
+
+ +
+
+
+ 24h ago + now +
+ +
+
+ <%= for {seg, idx} <- Enum.with_index(@segments) do %> +
+ <% end %> +
+
+ +
+
+
+ + {@online_checks} + online buckets +
+
+ + {@offline_checks} + offline buckets +
+
+
+ {@total_checks} total buckets +
+
+
+ +
+ No availability data found. +
+
+
+ """ + end + + defp format_pct(value) when is_float(value), do: :erlang.float_to_binary(value, decimals: 1) + defp format_pct(value) when is_integer(value), do: Integer.to_string(value) + defp format_pct(_), do: "—" + + # --------------------------------------------------------------------------- + # Sysmon Summary Section + # --------------------------------------------------------------------------- + + attr :summary, :map, required: true + + def sysmon_summary_section(assigns) do + cpu = Map.get(assigns.summary, :cpu, %{}) + memory = Map.get(assigns.summary, :memory, %{}) + disks = Map.get(assigns.summary, :disks, []) + icmp_rtt = Map.get(assigns.summary, :icmp_rtt) + + has_cpu = is_map(cpu) and not is_nil(Map.get(cpu, :timestamp)) + has_memory = is_map(memory) and not is_nil(Map.get(memory, :timestamp)) + + assigns = + assigns + |> assign(:cpu, cpu) + |> assign(:memory, memory) + |> assign(:disks, disks) + |> assign(:icmp_rtt, icmp_rtt) + |> assign(:has_cpu, has_cpu) + |> assign(:has_memory, has_memory) + + ~H""" +
+
+ System Metrics (Sysmon) +
+ +
+ <.metric_card + :if={@has_cpu} + title="CPU Usage" + value={format_pct(Map.get(@cpu, :avg_usage, 0.0))} + suffix="%" + subtitle={"#{Map.get(@cpu, :core_count, 0)} cores"} + icon="hero-cpu-chip" + color={cpu_color(Map.get(@cpu, :avg_usage, 0.0))} + /> + + <.metric_card + :if={@has_memory} + title="Memory" + value={format_pct(Map.get(@memory, :percent, 0.0))} + suffix="%" + subtitle={format_bytes(Map.get(@memory, :used_bytes, 0)) <> " / " <> format_bytes(Map.get(@memory, :total_bytes, 0))} + icon="hero-rectangle-stack" + color={memory_color(Map.get(@memory, :percent, 0.0))} + /> + + <.metric_card + :if={is_number(@icmp_rtt)} + title="Latency (ICMP)" + value={format_latency(@icmp_rtt)} + suffix="ms" + subtitle="Last check" + icon="hero-signal" + color={latency_color(@icmp_rtt)} + /> + + <.metric_card + :if={@disks != []} + title="Heaviest Disk" + value={format_pct(disk_max_percent(@disks))} + suffix="%" + subtitle={disk_max_mount(@disks)} + icon="hero-circle-stack" + color={disk_color(disk_max_percent(@disks))} + /> +
+ +
+
Disk Utilization
+
+ <%= for disk <- Enum.take(Enum.sort_by(@disks, & &1.percent, :desc), 5) do %> + <.disk_bar disk={disk} /> + <% end %> +
+
+
+ """ + end + + attr :title, :string, required: true + attr :value, :string, required: true + attr :suffix, :string, default: "" + attr :subtitle, :string, default: "" + attr :icon, :string, required: true + attr :color, :string, default: "primary" + + def metric_card(assigns) do + ~H""" +
+
+ <.icon name={@icon} class={["size-4", "text-#{@color}"]} /> + {@title} +
+
+ {@value} + {@suffix} +
+
{@subtitle}
+
+ """ + end + + attr :disk, :map, required: true + + def disk_bar(assigns) do + pct = Map.get(assigns.disk, :percent, 0.0) + mount = Map.get(assigns.disk, :mount_point, "?") + used = format_bytes(Map.get(assigns.disk, :used_bytes, 0)) + total = format_bytes(Map.get(assigns.disk, :total_bytes, 0)) + color = disk_color(pct) + + assigns = + assigns + |> assign(:pct, pct) + |> assign(:mount, mount) + |> assign(:used, used) + |> assign(:total, total) + |> assign(:color, color) + + ~H""" +
+
{@mount}
+
+
+
+
+ {format_pct(@pct)}% ({@used}) +
+
+ """ + end + + defp cpu_color(pct) when pct >= 90, do: "error" + defp cpu_color(pct) when pct >= 70, do: "warning" + defp cpu_color(_), do: "success" + + defp memory_color(pct) when pct >= 90, do: "error" + defp memory_color(pct) when pct >= 80, do: "warning" + defp memory_color(_), do: "info" + + defp disk_color(pct) when pct >= 90, do: "error" + defp disk_color(pct) when pct >= 80, do: "warning" + defp disk_color(_), do: "primary" + + defp latency_color(ms) when ms >= 200, do: "error" + defp latency_color(ms) when ms >= 100, do: "warning" + defp latency_color(_), do: "success" + + defp disk_max_percent([]), do: 0.0 + + defp disk_max_percent(disks), + do: Enum.max_by(disks, & &1.percent, fn -> %{percent: 0.0} end).percent + + defp disk_max_mount([]), do: "—" + + defp disk_max_mount(disks), + do: Enum.max_by(disks, & &1.percent, fn -> %{mount_point: "—"} end).mount_point + + defp format_bytes(bytes) when is_number(bytes) do + cond do + bytes >= 1_099_511_627_776 -> "#{Float.round(bytes / 1_099_511_627_776 * 1.0, 1)} TB" + bytes >= 1_073_741_824 -> "#{Float.round(bytes / 1_073_741_824 * 1.0, 1)} GB" + bytes >= 1_048_576 -> "#{Float.round(bytes / 1_048_576 * 1.0, 1)} MB" + bytes >= 1024 -> "#{Float.round(bytes / 1024 * 1.0, 1)} KB" + true -> "#{bytes} B" + end + end + + defp format_bytes(_), do: "—" + + defp format_latency(ms) when is_float(ms), do: :erlang.float_to_binary(ms, decimals: 1) + defp format_latency(ms) when is_integer(ms), do: Integer.to_string(ms) + defp format_latency(_), do: "—" + + # --------------------------------------------------------------------------- + # Data Loading Functions + # --------------------------------------------------------------------------- + + defp load_availability(srql_module, device_id) do + escaped_id = escape_value(device_id) + + query = + "in:timeseries_metrics metric_type:icmp device_id:\"#{escaped_id}\" " <> + "time:#{@availability_window} bucket:#{@availability_bucket} agg:count sort:timestamp:asc limit:100" + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) and rows != [] -> + build_availability(rows) + + _ -> + # Fallback: try healthcheck_results + fallback_query = + "in:healthcheck_results device_id:\"#{escaped_id}\" time:#{@availability_window} limit:200" + + case srql_module.query(fallback_query) do + {:ok, %{"results" => rows}} when is_list(rows) -> + build_availability_from_healthchecks(rows) + + _ -> + nil + end + end + end + + defp build_availability(rows) do + # Each row represents a bucket. If we got ICMP data, the device was online. + # This is a simplified availability based on metric presence. + total = length(rows) + + online = + Enum.count(rows, fn r -> + is_map(r) and is_number(Map.get(r, "value")) and Map.get(r, "value") > 0 + end) + + offline = total - online + + uptime_pct = if total > 0, do: Float.round(online / total * 100.0, 1), else: 0.0 + + segments = + rows + |> Enum.filter(&is_map/1) + |> Enum.map(fn r -> + value = Map.get(r, "value") + ts = Map.get(r, "timestamp", "") + available = is_number(value) and value > 0 + + %{ + available: available, + width: 100.0 / max(length(rows), 1), + title: "#{ts} - #{if available, do: "Online", else: "Offline"}" + } + end) + + %{ + uptime_pct: uptime_pct, + total_checks: total, + online_checks: online, + offline_checks: offline, + segments: segments + } + end + + defp build_availability_from_healthchecks(rows) do + total = length(rows) + + online = + Enum.count(rows, fn r -> + is_map(r) and (Map.get(r, "is_available") == true or Map.get(r, "available") == true) + end) + + offline = total - online + + uptime_pct = if total > 0, do: Float.round(online / total * 100.0, 1), else: 0.0 + + # Build segments (group by time buckets if we have timestamps) + segments = + rows + |> Enum.filter(&is_map/1) + # Limit segments for display + |> Enum.take(48) + |> Enum.map(fn r -> + available = Map.get(r, "is_available") == true or Map.get(r, "available") == true + ts = Map.get(r, "timestamp") || Map.get(r, "checked_at", "") + + %{ + available: available, + width: 100.0 / max(min(length(rows), 48), 1), + title: "#{ts} - #{if available, do: "Online", else: "Offline"}" + } + end) + + %{ + uptime_pct: uptime_pct, + total_checks: total, + online_checks: online, + offline_checks: offline, + segments: segments + } + end + + defp load_sysmon_summary(srql_module, device_id) do + escaped_id = escape_value(device_id) + + # Load CPU, Memory, Disk metrics in parallel (conceptually - in sequence here) + cpu_data = load_cpu_summary(srql_module, escaped_id) + memory_data = load_memory_summary(srql_module, escaped_id) + disk_data = load_disk_summary(srql_module, escaped_id) + icmp_rtt = load_icmp_rtt(srql_module, escaped_id) + + has_sysmon_metrics = is_map(cpu_data) or is_map(memory_data) or disk_data != [] + + if has_sysmon_metrics do + %{ + cpu: cpu_data || %{}, + memory: memory_data || %{}, + disks: disk_data || [], + icmp_rtt: icmp_rtt + } + else + nil + end + end + + defp load_cpu_summary(srql_module, escaped_id) do + query = "in:cpu_metrics device_id:\"#{escaped_id}\" sort:timestamp:desc limit:64" + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) and rows != [] -> + # Get unique cores and calculate average + values = + Enum.map(rows, fn r -> extract_numeric(Map.get(r, "value")) end) + |> Enum.filter(&is_number/1) + + cores = + rows + |> Enum.map(fn r -> Map.get(r, "core") || Map.get(r, "cpu_core") end) + |> Enum.filter(&is_binary/1) + |> Enum.uniq() + |> length() + + avg = if values != [], do: Enum.sum(values) / length(values), else: 0.0 + + %{ + avg_usage: Float.round(avg * 1.0, 1), + core_count: max(cores, 1), + timestamp: Map.get(List.first(rows), "timestamp") + } + + _ -> + nil + end + end + + defp load_memory_summary(srql_module, escaped_id) do + query = "in:memory_metrics device_id:\"#{escaped_id}\" sort:timestamp:desc limit:4" + + case srql_module.query(query) do + {:ok, %{"results" => [row | _]}} when is_map(row) -> + used = extract_numeric(Map.get(row, "used_bytes") || Map.get(row, "value")) + total = extract_numeric(Map.get(row, "total_bytes")) + + # Calculate percent if we have both + pct = + cond do + is_number(Map.get(row, "percent")) -> Map.get(row, "percent") + is_number(used) and is_number(total) and total > 0 -> used / total * 100.0 + true -> 0.0 + end + + %{ + used_bytes: used || 0, + total_bytes: total || 0, + percent: Float.round(pct * 1.0, 1), + timestamp: Map.get(row, "timestamp") + } + + _ -> + nil + end + end + + defp load_disk_summary(srql_module, escaped_id) do + query = "in:disk_metrics device_id:\"#{escaped_id}\" sort:timestamp:desc limit:24" + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) and rows != [] -> + # Group by mount point and take the latest for each + rows + |> Enum.filter(&is_map/1) + |> Enum.group_by(fn r -> + Map.get(r, "mount_point") || Map.get(r, "mount") || "unknown" + end) + |> Enum.map(fn {mount, disk_rows} -> + latest = List.first(disk_rows) + used = extract_numeric(Map.get(latest, "used_bytes") || Map.get(latest, "value")) + total = extract_numeric(Map.get(latest, "total_bytes")) + + pct = + cond do + is_number(Map.get(latest, "percent")) -> Map.get(latest, "percent") + is_number(used) and is_number(total) and total > 0 -> used / total * 100.0 + true -> 0.0 + end + + %{ + mount_point: mount, + used_bytes: used || 0, + total_bytes: total || 0, + percent: Float.round(pct * 1.0, 1) + } + end) + |> Enum.sort_by(& &1.percent, :desc) + + _ -> + [] + end + end + + defp load_icmp_rtt(srql_module, escaped_id) do + query = + "in:timeseries_metrics metric_type:icmp device_id:\"#{escaped_id}\" sort:timestamp:desc limit:1" + + case srql_module.query(query) do + {:ok, %{"results" => [row | _]}} when is_map(row) -> + value = extract_numeric(Map.get(row, "value")) + + # Convert from nanoseconds if value is very large + if is_number(value) do + if value > 1_000_000.0, + do: Float.round(value / 1_000_000.0, 2), + else: Float.round(value * 1.0, 2) + else + nil + end + + _ -> + nil + end + end + + defp extract_numeric(value) when is_number(value), do: value + + defp extract_numeric(value) when is_binary(value) do + case Float.parse(String.trim(value)) do + {num, ""} -> num + _ -> nil + end + end + + defp extract_numeric(_), do: nil + + # --------------------------------------------------------------------------- + # Healthcheck Section (GRPC/Service Health) + # --------------------------------------------------------------------------- + + attr :summary, :map, required: true + + def healthcheck_section(assigns) do + services = Map.get(assigns.summary, :services, []) + total = Map.get(assigns.summary, :total, 0) + available = Map.get(assigns.summary, :available, 0) + unavailable = Map.get(assigns.summary, :unavailable, 0) + uptime_pct = if total > 0, do: Float.round(available / total * 100.0, 1), else: 0.0 + + assigns = + assigns + |> assign(:services, services) + |> assign(:total, total) + |> assign(:available, available) + |> assign(:unavailable, unavailable) + |> assign(:uptime_pct, uptime_pct) + + ~H""" +
+
+ Service Health (GRPC) +
+
+ + {@available} + healthy +
+
+ + {@unavailable} + unhealthy +
+
+
+ +
+
+ No service health data available. +
+ +
+ <%= for svc <- Enum.take(@services, 10) do %> + <.healthcheck_row service={svc} /> + <% end %> +
+
+
+ """ + end + + attr :service, :map, required: true + + defp healthcheck_row(assigns) do + svc = assigns.service + available = Map.get(svc, :available, false) + service_name = Map.get(svc, :service_name, "Unknown") + service_type = Map.get(svc, :service_type, "") + message = Map.get(svc, :message, "") + timestamp = Map.get(svc, :timestamp, "") + + assigns = + assigns + |> assign(:available, available) + |> assign(:service_name, service_name) + |> assign(:service_type, service_type) + |> assign(:message, message) + |> assign(:timestamp, timestamp) + + ~H""" +
+
+
+
+ {@service_name} + + {@service_type} + +
+
{@message}
+
+
+ {format_healthcheck_time(@timestamp)} +
+
+ """ + end + + defp format_healthcheck_time(nil), do: "" + defp format_healthcheck_time(""), do: "" + + defp format_healthcheck_time(ts) when is_binary(ts) do + case DateTime.from_iso8601(ts) do + {:ok, dt, _} -> Calendar.strftime(dt, "%H:%M:%S") + _ -> ts + end + end + + defp format_healthcheck_time(_), do: "" + + defp load_healthcheck_summary(srql_module, device_id) do + case parse_service_device_id(device_id) do + {:service, "checker", checker_id} -> + case parse_checker_identity(checker_id) do + {:ok, service_name, agent_id} -> + query = + "in:services " <> + "service_name:\"#{escape_value(service_name)}\" " <> + "agent_id:\"#{escape_value(agent_id)}\" " <> + "time:last_24h sort:timestamp:desc limit:200" + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) and rows != [] -> + build_healthcheck_summary(rows) + + _ -> + nil + end + + :error -> + nil + end + + {:service, "agent", agent_id} -> + query = + "in:services " <> + "agent_id:\"#{escape_value(agent_id)}\" " <> + "time:last_24h sort:timestamp:desc limit:200" + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) and rows != [] -> + build_healthcheck_summary(rows) + + _ -> + nil + end + + {:service, "poller", poller_id} -> + query = + "in:services " <> + "poller_id:\"#{escape_value(poller_id)}\" " <> + "time:last_24h sort:timestamp:desc limit:200" + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) and rows != [] -> + build_healthcheck_summary(rows) + + _ -> + nil + end + + {:service, _service_type, service_id} -> + # Best-effort fallback: show any recent service_status rows matching this service_id as service_name. + query = + "in:services " <> + "service_name:\"#{escape_value(service_id)}\" " <> + "time:last_24h sort:timestamp:desc limit:200" + + case srql_module.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) and rows != [] -> + build_healthcheck_summary(rows) + + _ -> + nil + end + + _ -> + nil + end + end + + defp parse_service_device_id(device_id) when is_binary(device_id) do + case String.split(device_id, ":", parts: 3) do + ["serviceradar", service_type, service_id] when service_type != "" and service_id != "" -> + {:service, service_type, service_id} + + _ -> + :non_service + end + end + + defp parse_service_device_id(_), do: :non_service + + defp parse_checker_identity(checker_id) when is_binary(checker_id) do + case String.split(checker_id, "@", parts: 2) do + [service_name, agent_id] when service_name != "" and agent_id != "" -> + {:ok, service_name, agent_id} + + _ -> + :error + end + end + + defp parse_checker_identity(_), do: :error + + defp build_healthcheck_summary(rows) do + # Group by service_name and take most recent status for each + services_by_name = + rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + service_name = Map.get(row, "service_name") || "Unknown" + # Keep first (most recent) per service + Map.put_new(acc, service_name, row) + end) + + services = + services_by_name + |> Map.values() + |> Enum.map(fn row -> + %{ + service_name: Map.get(row, "service_name") || "Unknown", + service_type: Map.get(row, "service_type") || "", + available: Map.get(row, "available") == true, + message: Map.get(row, "message") || "", + timestamp: Map.get(row, "timestamp") || "" + } + end) + |> Enum.sort_by(fn s -> {s.available, s.service_name} end) + + available_count = Enum.count(services, & &1.available) + unavailable_count = length(services) - available_count + + %{ + services: services, + total: length(services), + available: available_count, + unavailable: unavailable_count + } + end + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/event_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/event_live/index.ex new file mode 100644 index 000000000..c829438f1 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/event_live/index.ex @@ -0,0 +1,380 @@ +defmodule ServiceRadarWebNGWeb.EventLive.Index do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + alias Phoenix.LiveView.JS + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + @default_limit 20 + @max_limit 100 + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Events") + |> assign(:events, []) + |> assign(:summary, %{total: 0, critical: 0, high: 0, medium: 0, low: 0}) + |> assign(:limit, @default_limit) + |> SRQLPage.init("events", default_limit: @default_limit)} + end + + @impl true + def handle_params(params, uri, socket) do + socket = + socket + |> SRQLPage.load_list(params, uri, :events, + default_limit: @default_limit, + max_limit: @max_limit + ) + + # Compute summary from current page results + summary = compute_summary(socket.assigns.events) + + {:noreply, assign(socket, :summary, summary)} + end + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_submit", params, fallback_path: "/events")} + end + + def handle_event("srql_builder_toggle", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: "events")} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_run", %{}, fallback_path: "/events")} + end + + def handle_event("srql_builder_add_filter", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_add_filter", params, entity: "events")} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, entity: "events")} + end + + @impl true + def render(assigns) do + pagination = get_in(assigns, [:srql, :pagination]) || %{} + assigns = assign(assigns, :pagination, pagination) + + ~H""" + +
+
+ <.event_summary summary={@summary} /> + + <.ui_panel> + <:header> +
+
Event Stream
+
+ Click any event to view full details. +
+
+ + + <.events_table id="events" events={@events} /> + +
+ <.ui_pagination + prev_cursor={Map.get(@pagination, "prev_cursor")} + next_cursor={Map.get(@pagination, "next_cursor")} + base_path="/events" + query={Map.get(@srql, :query, "")} + limit={@limit} + result_count={length(@events)} + /> +
+ +
+
+
+ """ + end + + attr :summary, :map, required: true + + defp event_summary(assigns) do + total = assigns.summary.total + critical = assigns.summary.critical + high = assigns.summary.high + medium = assigns.summary.medium + low = assigns.summary.low + + assigns = + assigns + |> assign(:total, total) + |> assign(:critical, critical) + |> assign(:high, high) + |> assign(:medium, medium) + |> assign(:low, low) + + ~H""" +
+
+
+ Event Severity Breakdown +
+
+ <.link patch={~p"/events"} class="btn btn-ghost btn-xs">All Events + <.link + patch={ + ~p"/events?#{%{q: "in:events severity:(Critical,High) time:last_24h sort:event_timestamp:desc"}}" + } + class="btn btn-ghost btn-xs text-error" + > + Critical/High + +
+
+
+ <.severity_stat + label="Critical" + count={@critical} + total={@total} + color="error" + severity="Critical" + /> + <.severity_stat label="High" count={@high} total={@total} color="warning" severity="High" /> + <.severity_stat label="Medium" count={@medium} total={@total} color="info" severity="Medium" /> + <.severity_stat label="Low" count={@low} total={@total} color="success" severity="Low" /> +
+
+ """ + end + + attr :label, :string, required: true + attr :count, :integer, required: true + attr :total, :integer, required: true + attr :color, :string, required: true + attr :severity, :string, required: true + + defp severity_stat(assigns) do + pct = if assigns.total > 0, do: round(assigns.count / assigns.total * 100), else: 0 + query = "in:events severity:#{assigns.severity} time:last_24h sort:event_timestamp:desc" + + assigns = + assigns + |> assign(:pct, pct) + |> assign(:query, query) + + ~H""" + <.link + patch={~p"/events?#{%{q: @query}}"} + class="rounded-lg bg-base-200/50 p-3 hover:bg-base-200 transition-colors cursor-pointer group" + > +
+ {@label} + {@pct}% +
+
{@count}
+
+
+
+ + """ + end + + defp color_class("error"), do: "text-error" + defp color_class("warning"), do: "text-warning" + defp color_class("info"), do: "text-info" + defp color_class("success"), do: "text-success" + defp color_class(_), do: "text-base-content" + + defp color_bg("error"), do: "bg-error" + defp color_bg("warning"), do: "bg-warning" + defp color_bg("info"), do: "bg-info" + defp color_bg("success"), do: "bg-success" + defp color_bg(_), do: "bg-base-content" + + attr :id, :string, required: true + attr :events, :list, default: [] + + defp events_table(assigns) do + ~H""" +
+ + + + + + + + + + + + + + + <%= for {event, idx} <- Enum.with_index(@events) do %> + + + + + + + <% end %> + +
+ Time + + Severity + + Source + + Message +
+ No events found. +
+ {format_timestamp(event)} + + <.severity_badge value={Map.get(event, "severity")} /> + + {event_source(event)} + + {event_message(event)} +
+
+ """ + end + + attr :value, :any, default: nil + + defp severity_badge(assigns) do + variant = + case normalize_severity(assigns.value) do + s when s in ["critical", "fatal", "error"] -> "error" + s when s in ["high", "warn", "warning"] -> "warning" + s when s in ["medium", "info"] -> "info" + s when s in ["low", "debug", "ok"] -> "success" + _ -> "ghost" + end + + label = + case assigns.value do + nil -> "—" + "" -> "—" + v when is_binary(v) -> v + v -> to_string(v) + end + + assigns = assign(assigns, :variant, variant) |> assign(:label, label) + + ~H""" + <.ui_badge variant={@variant} size="xs">{@label} + """ + end + + defp normalize_severity(nil), do: "" + defp normalize_severity(v) when is_binary(v), do: v |> String.trim() |> String.downcase() + defp normalize_severity(v), do: v |> to_string() |> normalize_severity() + + defp event_id(event) do + Map.get(event, "id") || Map.get(event, "event_id") || "unknown" + end + + defp format_timestamp(event) do + ts = Map.get(event, "event_timestamp") || Map.get(event, "timestamp") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S") + _ -> ts || "—" + end + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error + + defp event_source(event) do + # Try various source fields in order of preference + source = + Map.get(event, "host") || + Map.get(event, "source") || + Map.get(event, "device_id") || + Map.get(event, "subject") + + case source do + nil -> "—" + "" -> "—" + v when is_binary(v) -> v + v -> to_string(v) + end + end + + defp event_message(event) do + # Try various message fields in order of preference + message = + Map.get(event, "short_message") || + Map.get(event, "message") || + Map.get(event, "subject") || + Map.get(event, "description") + + case message do + nil -> "—" + "" -> "—" + v when is_binary(v) -> String.slice(v, 0, 200) + v -> v |> to_string() |> String.slice(0, 200) + end + end + + # Compute summary stats from events + defp compute_summary(events) when is_list(events) do + initial = %{total: 0, critical: 0, high: 0, medium: 0, low: 0} + + Enum.reduce(events, initial, fn event, acc -> + severity = normalize_severity(Map.get(event, "severity")) + + updated = + case severity do + s when s in ["critical", "fatal", "error"] -> Map.update!(acc, :critical, &(&1 + 1)) + s when s in ["high", "warn", "warning"] -> Map.update!(acc, :high, &(&1 + 1)) + s when s in ["medium", "info"] -> Map.update!(acc, :medium, &(&1 + 1)) + s when s in ["low", "debug", "ok"] -> Map.update!(acc, :low, &(&1 + 1)) + _ -> acc + end + + Map.update!(updated, :total, &(&1 + 1)) + end) + end + + defp compute_summary(_), do: %{total: 0, critical: 0, high: 0, medium: 0, low: 0} +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/event_live/show.ex b/web-ng/lib/serviceradar_web_ng_web/live/event_live/show.ex new file mode 100644 index 000000000..b3e9f4fb5 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/event_live/show.ex @@ -0,0 +1,461 @@ +defmodule ServiceRadarWebNGWeb.EventLive.Show do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Event Details") + |> assign(:event_id, nil) + |> assign(:event, nil) + |> assign(:error, nil) + |> assign(:srql, %{enabled: false})} + end + + @impl true + def handle_params(%{"event_id" => event_id}, _uri, socket) do + query = + "in:events id:\"#{escape_value(event_id)}\" sort:event_timestamp:desc limit:1" + + {event, error} = + case srql_module().query(query) do + {:ok, %{"results" => [event | _]}} when is_map(event) -> + {event, nil} + + {:ok, %{"results" => []}} -> + {nil, "Event not found. Note: Event detail view requires event_id field support."} + + {:ok, _other} -> + {nil, "Unexpected response format"} + + {:error, reason} -> + error_msg = format_error(reason) + + if String.contains?(error_msg, "unsupported filter") do + {nil, + "Event detail view is not available - the events entity does not support filtering by id."} + else + {nil, "Failed to load event: #{error_msg}"} + end + end + + {:noreply, + socket + |> assign(:event_id, event_id) + |> assign(:event, event) + |> assign(:error, error)} + end + + @impl true + def render(assigns) do + ~H""" + +
+ <.header> + Event Details + <:subtitle> + {@event_id} + + <:actions> + <.ui_button href={~p"/events"} variant="ghost" size="sm"> + Back to events + + + + +
+

{@error}

+
+ +
+ <.event_summary event={@event} /> + <.event_details event={@event} /> +
+
+
+ """ + end + + attr :event, :map, required: true + + defp event_summary(assigns) do + ~H""" +
+
+
+ Severity + <.severity_badge value={Map.get(@event, "severity")} /> +
+ +
+ Time + {format_timestamp(@event)} +
+ +
+ Host + {Map.get(@event, "host")} +
+ +
+ Source + {Map.get(@event, "source")} +
+
+ +
+ Message +

{Map.get(@event, "short_message")}

+
+ +
+ + Full Message + +

+ {Map.get(@event, "message")} +

+
+
+ """ + end + + attr :event, :map, required: true + + defp event_details(assigns) do + # Fields already shown in summary + summary_fields = + ~w(id event_id severity event_timestamp timestamp host source short_message message) + + # CloudEvents metadata fields (show separately) + cloudevents_fields = ~w(specversion datacontenttype type) + + # Get nested data if present + data = Map.get(assigns.event, "data", %{}) + has_data = is_map(data) and map_size(data) > 0 + + # Get CloudEvents metadata + ce_fields = + assigns.event + |> Map.take(cloudevents_fields) + |> Enum.reject(fn {_k, v} -> is_nil(v) or v == "" end) + |> Enum.sort_by(fn {k, _v} -> cloudevents_order(k) end) + + # Other fields (not summary, not CloudEvents, not data) + other_fields = + assigns.event + |> Map.keys() + |> Enum.reject(&(&1 in summary_fields or &1 in cloudevents_fields or &1 == "data")) + |> Enum.sort() + + assigns = + assigns + |> assign(:ce_fields, ce_fields) + |> assign(:other_fields, other_fields) + |> assign(:data, data) + |> assign(:has_data, has_data) + + ~H""" + <%!-- CloudEvents Metadata --%> +
+ + Event Metadata + +
+ <%= for {field, value} <- @ce_fields do %> +
+ {field_label(field)} + {value} +
+ <% end %> +
+
+ + <%!-- Event Data Payload --%> +
+ + Event Payload + +
+ <%= for {field, value} <- Enum.sort(@data) do %> +
+ {field_label(field)} + <.inline_value value={value} /> +
+ <% end %> +
+
+ + <%!-- Other Fields --%> +
+ + Additional Fields + +
+ <%= for field <- @other_fields do %> +
+ {field_label(field)} + <.inline_value value={Map.get(@event, field)} /> +
+ <% end %> +
+
+ """ + end + + # Render values inline (not in pre blocks) + attr :value, :any, default: nil + + defp inline_value(%{value: nil} = assigns) do + ~H|| + end + + defp inline_value(%{value: ""} = assigns) do + ~H|| + end + + defp inline_value(%{value: value} = assigns) when is_boolean(value) do + ~H|{to_string(@value)}| + end + + defp inline_value(%{value: value} = assigns) when is_number(value) do + ~H|{to_string(@value)}| + end + + defp inline_value(%{value: value} = assigns) when is_map(value) or is_list(value) do + # For nested objects, show a compact summary + summary = + case value do + m when is_map(m) -> "{#{map_size(m)} fields}" + l when is_list(l) -> "[#{length(l)} items]" + end + + assigns = assign(assigns, :summary, summary) + + ~H|{@summary}| + end + + defp inline_value(%{value: value} = assigns) when is_binary(value) do + # Truncate long values + display = + if String.length(value) > 100 do + String.slice(value, 0, 100) <> "…" + else + value + end + + assigns = assign(assigns, :display, display) + + ~H|{@display}| + end + + defp inline_value(assigns) do + ~H|{to_string(@value)}| + end + + # CloudEvents field ordering + defp cloudevents_order("specversion"), do: 0 + defp cloudevents_order("type"), do: 1 + defp cloudevents_order("datacontenttype"), do: 2 + defp cloudevents_order(_), do: 99 + + attr :value, :any, default: nil + + defp format_value(%{value: nil} = assigns) do + ~H|| + end + + defp format_value(%{value: ""} = assigns) do + ~H|| + end + + defp format_value(%{value: value} = assigns) when is_boolean(value) do + ~H""" + <.ui_badge variant={if @value, do: "success", else: "error"} size="xs"> + {to_string(@value)} + + """ + end + + defp format_value(%{value: value} = assigns) when is_map(value) or is_list(value) do + formatted = Jason.encode!(value, pretty: true) + assigns = assign(assigns, :formatted, formatted) + + ~H""" +
{@formatted}
+ """ + end + + defp format_value(%{value: value} = assigns) when is_binary(value) do + # Check if it looks like JSON + if String.starts_with?(value, "{") or String.starts_with?(value, "[") do + case Jason.decode(value) do + {:ok, decoded} -> + formatted = Jason.encode!(decoded, pretty: true) + assigns = assign(assigns, :formatted, formatted) + + ~H""" +
{@formatted}
+ """ + + {:error, _} -> + ~H""" + {@value} + """ + end + else + ~H""" + {@value} + """ + end + end + + defp format_value(assigns) do + ~H""" + {to_string(@value)} + """ + end + + attr :value, :any, default: nil + + defp severity_badge(assigns) do + variant = + case normalize_severity(assigns.value) do + s when s in ["critical", "fatal", "error"] -> "error" + s when s in ["high", "warn", "warning"] -> "warning" + s when s in ["medium", "info"] -> "info" + s when s in ["low", "debug", "ok"] -> "success" + _ -> "ghost" + end + + label = + case assigns.value do + nil -> "—" + "" -> "—" + v when is_binary(v) -> v + v -> to_string(v) + end + + assigns = assign(assigns, :variant, variant) |> assign(:label, label) + + ~H""" + <.ui_badge variant={@variant} size="sm">{@label} + """ + end + + defp normalize_severity(nil), do: "" + defp normalize_severity(v) when is_binary(v), do: v |> String.trim() |> String.downcase() + defp normalize_severity(v), do: v |> to_string() |> normalize_severity() + + defp format_timestamp(event) do + ts = Map.get(event, "event_timestamp") || Map.get(event, "timestamp") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC") + _ -> ts || "—" + end + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error + + defp has_value?(map, key) do + case Map.get(map, key) do + nil -> false + "" -> false + _ -> true + end + end + + # Known field label mappings + @field_labels %{ + # CloudEvents + "specversion" => "Spec Version", + "datacontenttype" => "Content Type", + "type" => "Event Type", + # Common fields + "_remote_addr" => "Remote Address", + "short_message" => "Message", + "timestamp" => "Timestamp", + "event_timestamp" => "Event Time", + "created_at" => "Created At", + "updated_at" => "Updated At", + "trace_id" => "Trace ID", + "span_id" => "Span ID", + "http_method" => "HTTP Method", + "http_route" => "HTTP Route", + "http_status_code" => "Status Code", + "grpc_service" => "gRPC Service", + "grpc_method" => "gRPC Method", + "grpc_status_code" => "gRPC Status", + "service_name" => "Service", + "host" => "Host", + "level" => "Level", + "severity" => "Severity", + "version" => "Version" + } + + defp field_label(field) when is_binary(field) do + case Map.get(@field_labels, field) do + nil -> humanize_field(field) + label -> label + end + end + + defp field_label(field), do: to_string(field) + + defp humanize_field(field) when is_binary(field) do + field + |> String.replace("_", " ") + |> String.split() + |> Enum.map(&String.capitalize/1) + |> Enum.join(" ") + end + + defp humanize_field(field), do: to_string(field) + + defp escape_value(value) when is_binary(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("\"", "\\\"") + end + + defp escape_value(other), do: escape_value(to_string(other)) + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/interface_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/interface_live/index.ex new file mode 100644 index 000000000..fe7580a46 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/interface_live/index.ex @@ -0,0 +1,117 @@ +defmodule ServiceRadarWebNGWeb.InterfaceLive.Index do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + @default_limit 20 + @max_limit 100 + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Interfaces") + |> assign(:interfaces, []) + |> assign(:limit, @default_limit) + |> SRQLPage.init("interfaces", default_limit: @default_limit)} + end + + @impl true + def handle_params(params, uri, socket) do + {:noreply, + socket + |> SRQLPage.load_list(params, uri, :interfaces, + default_limit: @default_limit, + max_limit: @max_limit + )} + end + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_submit", params, fallback_path: "/interfaces")} + end + + def handle_event("srql_builder_toggle", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: "interfaces")} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_run", %{}, fallback_path: "/interfaces")} + end + + def handle_event("srql_builder_add_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_add_filter", params, entity: "interfaces")} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, entity: "interfaces")} + end + + @impl true + def render(assigns) do + pagination = get_in(assigns, [:srql, :pagination]) || %{} + assigns = assign(assigns, :pagination, pagination) + + ~H""" + +
+ <.header> + Interfaces + <:subtitle>Network interface inventory. + <:actions> + <.ui_button variant="ghost" size="sm" patch={~p"/interfaces"}> + Reset + + + + + <.ui_panel> + <:header> +
+
Network Interfaces
+
+ Interface details and status. +
+
+ + + <.srql_results_table + id="interfaces" + rows={@interfaces} + container={false} + empty_message="No interfaces found." + /> + +
+ <.ui_pagination + prev_cursor={Map.get(@pagination, "prev_cursor")} + next_cursor={Map.get(@pagination, "next_cursor")} + base_path="/interfaces" + query={Map.get(@srql, :query, "")} + limit={@limit} + result_count={length(@interfaces)} + /> +
+ +
+
+ """ + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/log_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/log_live/index.ex new file mode 100644 index 000000000..82d337dcf --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/log_live/index.ex @@ -0,0 +1,2034 @@ +defmodule ServiceRadarWebNGWeb.LogLive.Index do + use ServiceRadarWebNGWeb, :live_view + + import Ecto.Query + import ServiceRadarWebNGWeb.UIComponents + + alias Phoenix.LiveView.JS + alias ServiceRadarWebNG.Repo + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + @default_limit 20 + @max_limit 100 + @default_stats_window "last_24h" + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Observability") + |> assign(:active_tab, "logs") + |> assign(:logs, []) + |> assign(:traces, []) + |> assign(:metrics, []) + |> assign(:sparklines, %{}) + |> assign(:summary, %{total: 0, fatal: 0, error: 0, warning: 0, info: 0, debug: 0}) + |> assign(:trace_stats, %{total: 0, error_traces: 0, slow_traces: 0}) + |> assign(:trace_latency, %{ + avg_duration_ms: 0.0, + p95_duration_ms: 0.0, + service_count: 0, + sample_size: 0 + }) + |> assign(:metrics_stats, %{ + total: 0, + slow_spans: 0, + error_spans: 0, + error_rate: 0.0, + avg_duration_ms: 0.0, + p95_duration_ms: 0.0, + sample_size: 0 + }) + |> assign(:limit, @default_limit) + |> SRQLPage.init("logs", default_limit: @default_limit)} + end + + @impl true + def handle_params(params, uri, socket) do + path = uri |> to_string() |> URI.parse() |> Map.get(:path) + + tab = + case Map.get(params, "tab") do + "logs" -> "logs" + "traces" -> "traces" + "metrics" -> "metrics" + _ -> default_tab_for_path(path) + end + + {entity, list_key} = + case tab do + "traces" -> {"otel_trace_summaries", :traces} + "metrics" -> {"otel_metrics", :metrics} + _ -> {"logs", :logs} + end + + socket = + socket + |> assign(:active_tab, tab) + |> assign(:logs, []) + |> assign(:traces, []) + |> assign(:metrics, []) + |> ensure_srql_entity(entity) + |> SRQLPage.load_list(params, uri, list_key, + default_limit: @default_limit, + max_limit: @max_limit + ) + + srql_module = srql_module() + + socket = + case tab do + "traces" -> + trace_latency = compute_trace_latency(socket.assigns.traces) + + socket + |> assign(:trace_stats, load_trace_stats(srql_module)) + |> assign(:trace_latency, trace_latency) + |> assign(:metrics_stats, %{ + total: 0, + slow_spans: 0, + error_spans: 0, + error_rate: 0.0, + avg_duration_ms: 0.0, + p95_duration_ms: 0.0, + sample_size: 0 + }) + + "metrics" -> + metrics_counts = load_metrics_counts(srql_module) + # Query duration stats from continuous aggregation for full 24h data + duration_stats = load_duration_stats_from_cagg() + + metrics_stats = + metrics_counts + |> Map.merge(duration_stats) + |> Map.put( + :error_rate, + compute_error_rate(metrics_counts.total, metrics_counts.error_spans) + ) + + # Load sparkline data for gauge/counter metrics + sparklines = load_sparklines(socket.assigns.metrics) + + socket + |> assign(:metrics_stats, metrics_stats) + |> assign(:sparklines, sparklines) + |> assign(:trace_stats, %{total: 0, error_traces: 0, slow_traces: 0}) + |> assign(:trace_latency, %{ + avg_duration_ms: 0.0, + p95_duration_ms: 0.0, + service_count: 0, + sample_size: 0 + }) + + _ -> + summary = load_summary(srql_module, Map.get(socket.assigns.srql, :query)) + + summary = + case summary do + %{total: 0} when is_list(socket.assigns.logs) and socket.assigns.logs != [] -> + compute_summary(socket.assigns.logs) + + other -> + other + end + + socket + |> assign(:summary, summary) + |> assign(:trace_stats, %{total: 0, error_traces: 0, slow_traces: 0}) + |> assign(:trace_latency, %{ + avg_duration_ms: 0.0, + p95_duration_ms: 0.0, + service_count: 0, + sample_size: 0 + }) + |> assign(:metrics_stats, %{ + total: 0, + slow_spans: 0, + error_spans: 0, + error_rate: 0.0, + avg_duration_ms: 0.0, + p95_duration_ms: 0.0, + sample_size: 0 + }) + end + + {:noreply, socket} + end + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + extra_params = %{"tab" => socket.assigns.active_tab} + + {:noreply, + SRQLPage.handle_event(socket, "srql_submit", params, + fallback_path: "/observability", + extra_params: extra_params + )} + end + + def handle_event("srql_builder_toggle", _params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: current_entity(socket))} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + extra_params = %{"tab" => socket.assigns.active_tab} + + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_run", %{}, + fallback_path: "/observability", + extra_params: extra_params + )} + end + + def handle_event("srql_builder_add_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_add_filter", params, + entity: current_entity(socket) + )} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, + entity: current_entity(socket) + )} + end + + @impl true + def render(assigns) do + pagination = get_in(assigns, [:srql, :pagination]) || %{} + assigns = assign(assigns, :pagination, pagination) + + ~H""" + +
+
+
+
+
Observability
+
+ Unified view of logs, traces, and metrics. +
+
+
+ + <.observability_tabs active={@active_tab} /> + + <.log_summary :if={@active_tab == "logs"} summary={@summary} /> + <.traces_summary + :if={@active_tab == "traces"} + stats={@trace_stats} + latency={@trace_latency} + /> + <.metrics_summary :if={@active_tab == "metrics"} stats={@metrics_stats} /> + + <.ui_panel> + <:header> +
+
{panel_title(@active_tab)}
+
+ {panel_subtitle(@active_tab)} +
+
+ + + <.logs_table :if={@active_tab == "logs"} id="logs" logs={@logs} /> + <.traces_table :if={@active_tab == "traces"} id="traces" traces={@traces} /> + <.metrics_table :if={@active_tab == "metrics"} id="metrics" metrics={@metrics} sparklines={@sparklines} /> + +
+ <.ui_pagination + prev_cursor={Map.get(@pagination, "prev_cursor")} + next_cursor={Map.get(@pagination, "next_cursor")} + base_path={Map.get(@srql, :page_path) || "/observability"} + query={Map.get(@srql, :query, "")} + limit={@limit} + result_count={panel_result_count(@active_tab, @logs, @traces, @metrics)} + extra_params={%{tab: @active_tab}} + /> +
+ +
+
+
+ """ + end + + attr :summary, :map, required: true + + defp log_summary(assigns) do + total = assigns.summary.total + fatal = assigns.summary.fatal + error = assigns.summary.error + warning = assigns.summary.warning + info = assigns.summary.info + debug = assigns.summary.debug + + assigns = + assigns + |> assign(:total, total) + |> assign(:fatal, fatal) + |> assign(:error, error) + |> assign(:warning, warning) + |> assign(:info, info) + |> assign(:debug, debug) + + ~H""" +
+
+
+
Log Level Breakdown
+
+ {format_compact_int(@total)} total (24h) +
+
+
+ <.link patch={~p"/observability?#{%{tab: "logs"}}"} class="btn btn-ghost btn-xs"> + All Logs + + <.link + patch={ + ~p"/observability?#{%{tab: "logs", q: "in:logs severity_text:(fatal,error,FATAL,ERROR) time:last_24h sort:timestamp:desc"}}" + } + class="btn btn-ghost btn-xs text-error" + > + Errors Only + +
+
+
+ <.level_stat label="Fatal" count={@fatal} total={@total} color="error" level="fatal,FATAL" /> + <.level_stat label="Error" count={@error} total={@total} color="warning" level="error,ERROR" /> + <.level_stat + label="Warning" + count={@warning} + total={@total} + color="info" + level="warn,warning,WARN,WARNING" + /> + <.level_stat label="Info" count={@info} total={@total} color="primary" level="info,INFO" /> + <.level_stat + label="Debug" + count={@debug} + total={@total} + color="success" + level="debug,trace,DEBUG,TRACE" + /> +
+
+ """ + end + + attr :label, :string, required: true + attr :count, :integer, required: true + attr :total, :integer, required: true + attr :color, :string, required: true + attr :level, :string, required: true + + defp level_stat(assigns) do + pct = if assigns.total > 0, do: round(assigns.count / assigns.total * 100), else: 0 + query = "in:logs severity_text:(#{assigns.level}) time:last_24h sort:timestamp:desc" + + assigns = + assigns + |> assign(:pct, pct) + |> assign(:query, query) + + ~H""" + <.link + patch={~p"/observability?#{%{tab: "logs", q: @query}}"} + class="rounded-lg bg-base-200/50 p-3 hover:bg-base-200 transition-colors cursor-pointer group" + > +
+ {@label} + {@pct}% +
+
{@count}
+
+
+
+ + """ + end + + defp color_class("error"), do: "text-error" + defp color_class("warning"), do: "text-warning" + defp color_class("info"), do: "text-info" + defp color_class("primary"), do: "text-primary" + defp color_class("success"), do: "text-success" + defp color_class(_), do: "text-base-content" + + defp color_bg("error"), do: "bg-error" + defp color_bg("warning"), do: "bg-warning" + defp color_bg("info"), do: "bg-info" + defp color_bg("primary"), do: "bg-primary" + defp color_bg("success"), do: "bg-success" + defp color_bg(_), do: "bg-base-content" + + attr :active, :string, required: true + + defp observability_tabs(assigns) do + ~H""" +
+
+ <.tab_button id="logs" label="Logs" icon="hero-rectangle-stack" active={@active} /> + <.tab_button id="traces" label="Traces" icon="hero-clock" active={@active} /> + <.tab_button id="metrics" label="Metrics" icon="hero-chart-bar" active={@active} /> +
+
+ """ + end + + attr :id, :string, required: true + attr :label, :string, required: true + attr :icon, :string, required: true + attr :active, :string, required: true + + defp tab_button(assigns) do + active? = assigns.active == assigns.id + assigns = assign(assigns, :active?, active?) + + ~H""" + <.link + patch={~p"/observability?#{%{tab: @id}}"} + class={[ + "btn btn-sm rounded-lg flex items-center gap-2 transition-colors", + @active? && "btn-primary", + not @active? && "btn-ghost" + ]} + > + <.icon name={@icon} class="size-4" /> + {@label} + + """ + end + + attr :stats, :map, required: true + attr :latency, :map, required: true + + defp traces_summary(assigns) do + total = Map.get(assigns.stats, :total, 0) + error_traces = Map.get(assigns.stats, :error_traces, 0) + slow_traces = Map.get(assigns.stats, :slow_traces, 0) + error_rate = if total > 0, do: Float.round(error_traces / total * 100.0, 1), else: 0.0 + successful = max(total - error_traces, 0) + + avg_duration_ms = Map.get(assigns.latency, :avg_duration_ms, 0.0) + p95_duration_ms = Map.get(assigns.latency, :p95_duration_ms, 0.0) + services_count = Map.get(assigns.latency, :service_count, 0) + sample_size = Map.get(assigns.latency, :sample_size, 0) + + assigns = + assigns + |> assign(:total, total) + |> assign(:successful, successful) + |> assign(:error_traces, error_traces) + |> assign(:slow_traces, slow_traces) + |> assign(:error_rate, error_rate) + |> assign(:avg_duration_ms, avg_duration_ms) + |> assign(:p95_duration_ms, p95_duration_ms) + |> assign(:services_count, services_count) + |> assign(:sample_size, sample_size) + + ~H""" +
+ <.obs_stat title="Total Traces" value={format_compact_int(@total)} icon="hero-clock" /> + <.obs_stat + title="Successful" + value={format_compact_int(@successful)} + icon="hero-check-circle" + tone="success" + /> + <.obs_stat + title="Errors" + value={format_compact_int(@error_traces)} + icon="hero-x-circle" + tone={if @error_traces > 0, do: "error", else: "success"} + /> + <.obs_stat + title="Error Rate" + value={"#{format_pct(@error_rate)}%"} + icon="hero-trending-up" + tone={if @error_rate > 1.0, do: "error", else: "success"} + /> + <.obs_stat + title="Avg Duration" + value={format_duration_ms(@avg_duration_ms)} + subtitle={if @sample_size > 0, do: "sample (#{@sample_size})", else: "sample"} + icon="hero-chart-bar" + tone="info" + /> + <.obs_stat + title="P95 Duration" + value={format_duration_ms(@p95_duration_ms)} + subtitle={if @services_count > 0, do: "#{@services_count} services", else: "sample"} + icon="hero-bolt" + tone="warning" + /> +
+ """ + end + + attr :stats, :map, required: true + + defp metrics_summary(assigns) do + total = Map.get(assigns.stats, :total, 0) + slow_spans = Map.get(assigns.stats, :slow_spans, 0) + error_spans = Map.get(assigns.stats, :error_spans, 0) + error_rate = Map.get(assigns.stats, :error_rate, 0.0) + avg_duration_ms = Map.get(assigns.stats, :avg_duration_ms, 0.0) + p95_duration_ms = Map.get(assigns.stats, :p95_duration_ms, 0.0) + sample_size = Map.get(assigns.stats, :sample_size, 0) + + assigns = + assigns + |> assign(:total, total) + |> assign(:slow_spans, slow_spans) + |> assign(:error_spans, error_spans) + |> assign(:error_rate, error_rate) + |> assign(:avg_duration_ms, avg_duration_ms) + |> assign(:p95_duration_ms, p95_duration_ms) + |> assign(:sample_size, sample_size) + + ~H""" +
+ <.obs_stat title="Total Metrics" value={format_compact_int(@total)} icon="hero-chart-bar" /> + <.obs_stat + title="Slow Spans" + value={format_compact_int(@slow_spans)} + icon="hero-bolt" + tone={if @slow_spans > 0, do: "warning", else: "success"} + /> + <.obs_stat + title="Errors" + value={format_compact_int(@error_spans)} + icon="hero-exclamation-triangle" + tone={if @error_spans > 0, do: "error", else: "success"} + /> + <.obs_stat + title="Error Rate" + value={"#{format_pct(@error_rate)}%"} + icon="hero-trending-up" + tone={if @error_rate > 1.0, do: "error", else: "success"} + /> + <.obs_stat + title="Avg Duration" + value={format_duration_ms(@avg_duration_ms)} + subtitle={if @sample_size > 0, do: "sample (#{@sample_size})", else: "sample"} + icon="hero-clock" + tone="info" + /> + <.obs_stat + title="P95 Duration" + value={format_duration_ms(@p95_duration_ms)} + subtitle="sample" + icon="hero-chart-bar" + tone="neutral" + /> +
+ """ + end + + attr :title, :string, required: true + attr :value, :string, required: true + attr :subtitle, :string, default: nil + attr :icon, :string, required: true + attr :tone, :string, default: "neutral", values: ~w(neutral success warning error info) + + defp obs_stat(assigns) do + {bg, fg} = + case assigns.tone do + "success" -> {"bg-success/10", "text-success"} + "warning" -> {"bg-warning/10", "text-warning"} + "error" -> {"bg-error/10", "text-error"} + "info" -> {"bg-info/10", "text-info"} + _ -> {"bg-base-200/50", "text-base-content/60"} + end + + assigns = assign(assigns, :bg, bg) |> assign(:fg, fg) + + ~H""" +
+
+
+ <.icon name={@icon} class={["size-4", @fg]} /> +
+
+
{@title}
+
{@value}
+
+ {@subtitle} +
+
+
+
+ """ + end + + attr :id, :string, required: true + attr :logs, :list, default: [] + + defp logs_table(assigns) do + ~H""" +
+ + + + + + + + + + + + + + + <%= for {log, idx} <- Enum.with_index(@logs) do %> + + + + + + + <% end %> + +
+ Time + + Level + + Service + + Message +
+ No log entries found. +
+ {format_timestamp(log)} + + <.severity_badge value={Map.get(log, "severity_text")} /> + + {log_service(log)} + + {log_message(log)} +
+
+ """ + end + + attr :id, :string, required: true + attr :traces, :list, default: [] + + defp traces_table(assigns) do + ~H""" +
+ + + + + + + + + + + + + + + + <%= for {trace, idx} <- Enum.with_index(@traces) do %> + + + + + + + + <% end %> + +
+ Time + + Service + + Operation + + Duration + + Errors +
+ No traces found. +
{format_timestamp(trace)} + {Map.get(trace, "root_service_name") || "—"} + + {Map.get(trace, "root_span_name") || "—"} + + {format_duration_ms(Map.get(trace, "duration_ms"))} + + to_int())}> + {Map.get(trace, "error_count", 0) |> to_int()} + +
+
+ """ + end + + attr :id, :string, required: true + attr :metrics, :list, default: [] + attr :sparklines, :map, default: %{} + + defp metrics_table(assigns) do + values = + assigns.metrics + |> Enum.filter(&is_map/1) + |> Enum.map(&metric_value_ms/1) + |> Enum.filter(&is_number/1) + + {min_v, max_v} = + case values do + [] -> {0.0, 0.0} + _ -> {Enum.min(values), Enum.max(values)} + end + + assigns = + assigns + |> assign(:min_v, min_v) + |> assign(:max_v, max_v) + + ~H""" +
+ + + + + + + + + + + + + + + + + + <%= for {metric, idx} <- Enum.with_index(@metrics) do %> + + + + + + + + + + <% end %> + +
+ Time + + Service + + Type + + Operation + + Value + + Trend + + Logs +
+ No metrics found. +
{format_timestamp(metric)} + {Map.get(metric, "service_name") || "—"} + + + + {Map.get(metric, "metric_type") || "—"} + + + + <.link + :if={is_binary(Map.get(metric, "span_id")) and Map.get(metric, "span_id") != ""} + navigate={~p"/observability/metrics/#{Map.get(metric, "span_id")}"} + class="link link-hover" + > + {metric_operation(metric)} + + + {metric_operation(metric)} + + + {format_metric_value(metric)} + + <.metric_viz metric={metric} sparklines={@sparklines} /> + + <.link + :if={is_binary(Map.get(metric, "trace_id")) and Map.get(metric, "trace_id") != ""} + navigate={correlate_metric_href(metric)} + class="btn btn-ghost btn-xs" + title="View correlated logs" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + + + — + +
+
+ """ + end + + attr :metric, :map, required: true + attr :sparklines, :map, default: %{} + + defp metric_viz(assigns) do + metric_type = normalize_string(Map.get(assigns.metric, "metric_type")) || "" + metric_name = Map.get(assigns.metric, "metric_name") + + # Get sparkline data for this metric + sparkline_data = Map.get(assigns.sparklines, metric_name, []) + + assigns = + assigns + |> assign(:metric_type, metric_type) + |> assign(:sparkline_data, sparkline_data) + + ~H""" + <%= case @metric_type do %> + <% "histogram" -> %> + <.histogram_viz metric={@metric} /> + <% type when type in ["gauge", "counter"] -> %> + <%= if length(@sparkline_data) >= 3 do %> + <.sparkline data={@sparkline_data} /> + <% else %> + + <% end %> + <% "span" -> %> + <.span_duration_viz metric={@metric} /> + <% _ -> %> + + <% end %> + """ + end + + attr :data, :list, required: true + + defp sparkline(assigns) do + data = assigns.data + min_val = Enum.min(data) + max_val = Enum.max(data) + range = max_val - min_val + + # Normalize to 0-100 range for SVG, with some padding + points = + data + |> Enum.with_index() + |> Enum.map(fn {val, idx} -> + x = idx / max(length(data) - 1, 1) * 100 + y = if range > 0, do: 100 - (val - min_val) / range * 80 - 10, else: 50 + "#{Float.round(x, 1)},#{Float.round(y, 1)}" + end) + |> Enum.join(" ") + + # Determine trend color based on first vs last value + first_val = List.first(data) || 0 + last_val = List.last(data) || 0 + trend_color = if last_val > first_val * 1.1, do: "stroke-warning", else: "stroke-info" + + assigns = + assigns + |> assign(:points, points) + |> assign(:trend_color, trend_color) + + ~H""" + + + + """ + end + + attr :metric, :map, required: true + + # Duration visualization for span-type metrics + defp span_duration_viz(assigns) do + duration_ms = extract_duration_ms(assigns.metric) + is_slow = Map.get(assigns.metric, "is_slow") == true + + # If no duration, show dash + if is_nil(duration_ms) or duration_ms <= 0 do + ~H""" + + """ + else + # Scale 0-1500ms to 0-100% (threshold at 500ms = 33%) + threshold_ms = 500 + max_display_ms = threshold_ms * 3 + pct = min(duration_ms / max_display_ms * 100, 100) + threshold_pct = threshold_ms / max_display_ms * 100 + + # Color based on duration relative to threshold + bar_color = + cond do + duration_ms <= threshold_ms * 0.5 -> "bg-success" + duration_ms <= threshold_ms -> "bg-success/70" + duration_ms <= threshold_ms * 1.5 -> "bg-warning" + duration_ms <= threshold_ms * 2 -> "bg-warning/80" + true -> "bg-error" + end + + assigns = + assigns + |> assign(:pct, pct) + |> assign(:threshold_pct, threshold_pct) + |> assign(:bar_color, bar_color) + |> assign(:is_slow, is_slow) + |> assign(:duration_ms, duration_ms) + + ~H""" +
+
+
+
+
+ SLOW +
+ """ + end + end + + attr :metric, :map, required: true + + defp histogram_viz(assigns) do + # For histograms with duration data, show a duration-based gauge bar + # Most OTEL histograms are duration distributions + duration_ms = extract_duration_value(assigns.metric) + + # Use reasonable bounds for duration visualization (0-1000ms as typical range) + # Anything over 1s will show as full bar + pct = + cond do + not is_number(duration_ms) or duration_ms <= 0 -> 0 + duration_ms >= 1000 -> 100 + true -> duration_ms / 10 # 0-1000ms maps to 0-100% + end + + # Color based on duration + bar_color = + cond do + not is_number(duration_ms) or duration_ms <= 0 -> "bg-base-content/20" + duration_ms >= 500 -> "bg-error" + duration_ms >= 100 -> "bg-warning" + true -> "bg-success" + end + + assigns = + assigns + |> assign(:pct, pct) + |> assign(:bar_color, bar_color) + |> assign(:duration_ms, duration_ms) + + ~H""" +
0, do: "#{Float.round(@duration_ms * 1.0, 1)}ms", else: "no duration"}> +
+
+
+
+ """ + end + + defp extract_histogram_count(metric) do + cond do + is_number(metric["count"]) -> trunc(metric["count"]) + is_binary(metric["count"]) -> trunc(extract_number(metric["count"]) || 0) + is_number(metric["bucket_count"]) -> trunc(metric["bucket_count"]) + true -> 0 + end + end + + defp extract_duration_ms(metric) do + cond do + is_number(metric["duration_ms"]) -> metric["duration_ms"] + is_binary(metric["duration_ms"]) -> extract_number(metric["duration_ms"]) + is_number(metric["duration_seconds"]) -> metric["duration_seconds"] * 1000 + is_binary(metric["duration_seconds"]) -> + case extract_number(metric["duration_seconds"]) do + n when is_number(n) -> n * 1000 + _ -> nil + end + true -> nil + end + end + + defp metric_type_badge_class(metric) do + case metric |> Map.get("metric_type") |> normalize_severity() do + "histogram" -> "badge badge-sm badge-info" + "gauge" -> "badge badge-sm badge-success" + "counter" -> "badge badge-sm badge-primary" + _ -> "badge badge-sm badge-ghost" + end + end + + defp format_metric_value(metric) do + # Get metric name from multiple possible fields + metric_name = get_metric_name(metric) + metric_type = normalize_string(Map.get(metric, "metric_type")) + # NEW: Check for explicit unit field from backend + unit = normalize_string(Map.get(metric, "unit")) + + # PRIORITY 0: Histograms are distributions - show sample count, not a single value + # Trying to show one number for a histogram is misleading + if metric_type == "histogram" do + format_histogram_value(metric) + else + # PRIORITY 0.5: If we have an explicit unit field, use it directly + # This is the most reliable way to format metrics correctly + cond do + unit != nil -> + format_with_explicit_unit(metric, unit) + + is_bytes_metric?(metric_name) -> + format_bytes_value(metric) + + is_count_metric?(metric_name) or is_stats_metric?(metric_name) -> + format_count_value(metric) + + # PRIORITY 2: Only format as duration if: + # - Metric name explicitly suggests duration/latency/time, OR + # - It's a span type (all spans should show duration if available) + is_duration_metric?(metric_name) and has_duration_field?(metric) -> + format_duration_value(metric) + + # Spans should always show duration_ms - that's their primary metric + metric_type == "span" and has_duration_field?(metric) -> + format_duration_value(metric) + + is_actual_timing_span?(metric) and has_duration_field?(metric) -> + format_duration_value(metric) + + # PRIORITY 3: Raw value fallback - just show the number, no units + has_any_value?(metric) -> + format_raw_value(metric, metric_type) + + true -> + "—" + end + end + end + + # Format metric value using explicit unit field from backend + defp format_with_explicit_unit(metric, unit) do + value = extract_primary_value(metric) + + if is_nil(value) do + "—" + else + case unit do + # Duration units + "ms" -> format_ms_value(value) + "s" -> format_seconds_value(value) + "ns" -> format_ns_value(value) + "us" -> format_us_value(value) + # Byte units + "bytes" -> format_bytes_from_value(value) + "By" -> format_bytes_from_value(value) + "kb" -> format_bytes_from_value(value * 1024) + "KiB" -> format_bytes_from_value(value * 1024) + "mb" -> format_bytes_from_value(value * 1024 * 1024) + "MiB" -> format_bytes_from_value(value * 1024 * 1024) + "gb" -> format_bytes_from_value(value * 1024 * 1024 * 1024) + "GiB" -> format_bytes_from_value(value * 1024 * 1024 * 1024) + # Count/dimensionless + "1" -> format_count_from_value(value) + "{request}" -> format_count_from_value(value) + "{connection}" -> format_count_from_value(value) + "{thread}" -> format_count_from_value(value) + "{goroutine}" -> format_count_from_value(value) + # Percentage + "%" -> "#{Float.round(value * 1.0, 1)}%" + # Default: show value with unit suffix + _ -> "#{format_compact_value(value)} #{unit}" + end + end + end + + defp extract_primary_value(metric) do + cond do + is_number(metric["value"]) -> metric["value"] + is_binary(metric["value"]) -> extract_number(metric["value"]) + is_number(metric["duration_ms"]) -> metric["duration_ms"] + is_binary(metric["duration_ms"]) -> extract_number(metric["duration_ms"]) + is_number(metric["sum"]) -> metric["sum"] + is_binary(metric["sum"]) -> extract_number(metric["sum"]) + is_number(metric["count"]) -> metric["count"] + is_binary(metric["count"]) -> extract_number(metric["count"]) + true -> nil + end + end + + defp format_ms_value(ms) when is_number(ms) do + cond do + ms >= 60_000 -> "#{Float.round(ms / 60_000, 1)}m" + ms >= 1000 -> "#{Float.round(ms / 1000, 2)}s" + true -> "#{Float.round(ms * 1.0, 1)}ms" + end + end + + defp format_seconds_value(s) when is_number(s) do + ms = s * 1000 + format_ms_value(ms) + end + + defp format_ns_value(ns) when is_number(ns) do + ms = ns / 1_000_000 + format_ms_value(ms) + end + + defp format_us_value(us) when is_number(us) do + ms = us / 1000 + format_ms_value(ms) + end + + defp format_bytes_from_value(bytes) when is_number(bytes) do + cond do + bytes >= 1_099_511_627_776 -> "#{Float.round(bytes / 1_099_511_627_776 * 1.0, 1)} TB" + bytes >= 1_073_741_824 -> "#{Float.round(bytes / 1_073_741_824 * 1.0, 1)} GB" + bytes >= 1_048_576 -> "#{Float.round(bytes / 1_048_576 * 1.0, 1)} MB" + bytes >= 1024 -> "#{Float.round(bytes / 1024 * 1.0, 1)} KB" + true -> "#{trunc(bytes)} B" + end + end + + defp format_count_from_value(count) when is_number(count) do + cond do + count >= 1_000_000 -> "#{Float.round(count / 1_000_000 * 1.0, 1)}M" + count >= 1_000 -> "#{Float.round(count / 1_000 * 1.0, 1)}k" + is_float(count) -> "#{trunc(count)}" + true -> "#{count}" + end + end + + defp format_compact_value(value) when is_number(value) do + cond do + value >= 1_000_000 -> "#{Float.round(value / 1_000_000 * 1.0, 1)}M" + value >= 1_000 -> "#{Float.round(value / 1_000 * 1.0, 1)}k" + is_float(value) -> "#{Float.round(value, 2)}" + true -> "#{value}" + end + end + + # Histograms are distributions - show duration if available, otherwise sample count + defp format_histogram_value(metric) do + # For gRPC/HTTP histograms, duration_ms is the most meaningful value + duration_ms = extract_duration_value(metric) + unit = normalize_string(Map.get(metric, "unit")) + + cond do + # If we have a duration value, show it + is_number(duration_ms) and duration_ms > 0 -> + format_duration_ms(duration_ms) + + # If we have an explicit unit with a value, use that + unit != nil -> + value = extract_primary_value(metric) + if is_number(value) and value > 0 do + format_with_explicit_unit(metric, unit) + else + format_histogram_count_or_dash(metric) + end + + # Fallback to sample count + true -> + format_histogram_count_or_dash(metric) + end + end + + defp format_histogram_count_or_dash(metric) do + count = extract_histogram_count(metric) + + if count > 0 do + "#{format_number(count)} samples" + else + "—" + end + end + + defp extract_duration_value(metric) do + cond do + is_number(metric["duration_ms"]) -> metric["duration_ms"] + is_binary(metric["duration_ms"]) -> extract_number(metric["duration_ms"]) + is_number(metric["duration_seconds"]) -> metric["duration_seconds"] * 1000 + is_binary(metric["duration_seconds"]) -> + case extract_number(metric["duration_seconds"]) do + nil -> nil + val -> val * 1000 + end + true -> nil + end + end + + defp format_duration_ms(ms) when is_number(ms) do + cond do + ms >= 60_000 -> "#{Float.round(ms / 60_000, 1)}m" + ms >= 1000 -> "#{Float.round(ms / 1000, 2)}s" + ms >= 1 -> "#{Float.round(ms * 1.0, 1)}ms" + ms > 0 -> "#{Float.round(ms * 1000, 0)}µs" + true -> "0ms" + end + end + + defp format_number(n) when n >= 1_000_000, do: "#{Float.round(n / 1_000_000 * 1.0, 1)}M" + defp format_number(n) when n >= 1_000, do: "#{Float.round(n / 1_000 * 1.0, 1)}k" + defp format_number(n) when is_float(n), do: "#{trunc(n)}" + defp format_number(n), do: "#{n}" + + defp get_metric_name(metric) do + # Check multiple fields where the metric name might be stored + normalize_string(Map.get(metric, "span_name")) || + normalize_string(Map.get(metric, "metric_name")) || + normalize_string(Map.get(metric, "name")) || + "" + end + + defp normalize_string(nil), do: nil + defp normalize_string(""), do: nil + defp normalize_string(s) when is_binary(s), do: String.trim(s) + defp normalize_string(_), do: nil + + defp is_bytes_metric?(nil), do: false + defp is_bytes_metric?(""), do: false + + defp is_bytes_metric?(name) when is_binary(name) do + downcased = String.downcase(name) + + String.contains?(downcased, "bytes") or + String.contains?(downcased, "memory") or + String.contains?(downcased, "heap") or + String.contains?(downcased, "alloc") + end + + defp is_count_metric?(nil), do: false + defp is_count_metric?(""), do: false + + defp is_count_metric?(name) when is_binary(name) do + downcased = String.downcase(name) + + String.ends_with?(downcased, "_count") or + String.ends_with?(downcased, "_total") or + String.contains?(downcased, "goroutines") or + String.contains?(downcased, "threads") + end + + # Stats/counter-like metrics (processed, skipped, etc.) + defp is_stats_metric?(nil), do: false + defp is_stats_metric?(""), do: false + + defp is_stats_metric?(name) when is_binary(name) do + downcased = String.downcase(name) + + String.contains?(downcased, "_stats_") or + String.contains?(downcased, "processed") or + String.contains?(downcased, "skipped") or + String.contains?(downcased, "inferred") or + String.contains?(downcased, "canonical") or + String.contains?(downcased, "requests") or + String.contains?(downcased, "connections") or + String.contains?(downcased, "errors") or + String.contains?(downcased, "failures") + end + + # Check if metric name explicitly suggests it's a duration/timing metric + defp is_duration_metric?(nil), do: false + defp is_duration_metric?(""), do: false + + defp is_duration_metric?(name) when is_binary(name) do + downcased = String.downcase(name) + + String.contains?(downcased, "duration") or + String.contains?(downcased, "latency") or + String.contains?(downcased, "_time") or + String.ends_with?(downcased, "time") or + String.contains?(downcased, "elapsed") or + String.contains?(downcased, "response_ms") or + String.contains?(downcased, "request_ms") + end + + # Check if this is an actual timing span with real HTTP/gRPC context (not empty strings) + defp is_actual_timing_span?(metric) do + has_http = + is_non_empty_string?(metric["http_route"]) or + is_non_empty_string?(metric["http_method"]) + + has_grpc = + is_non_empty_string?(metric["grpc_service"]) or + is_non_empty_string?(metric["grpc_method"]) + + # Also check for span type + is_span = normalize_string(Map.get(metric, "metric_type")) == "span" + + (has_http or has_grpc) and is_span + end + + defp is_non_empty_string?(nil), do: false + defp is_non_empty_string?(""), do: false + defp is_non_empty_string?(s) when is_binary(s), do: String.trim(s) != "" + defp is_non_empty_string?(_), do: false + + defp has_duration_field?(metric) do + is_number(metric["duration_ms"]) or is_binary(metric["duration_ms"]) or + is_number(metric["duration_seconds"]) or is_binary(metric["duration_seconds"]) + end + + defp has_any_value?(metric) do + is_number(metric["value"]) or is_binary(metric["value"]) or + is_number(metric["sum"]) or is_binary(metric["sum"]) or + is_number(metric["count"]) or is_binary(metric["count"]) or + is_number(metric["duration_ms"]) or is_binary(metric["duration_ms"]) + end + + defp format_duration_value(metric) do + ms = + cond do + is_number(metric["duration_ms"]) -> + metric["duration_ms"] * 1.0 + + is_binary(metric["duration_ms"]) -> + extract_number(metric["duration_ms"]) || 0.0 + + is_number(metric["duration_seconds"]) -> + metric["duration_seconds"] * 1000.0 + + is_binary(metric["duration_seconds"]) -> + case extract_number(metric["duration_seconds"]) do + n when is_number(n) -> n * 1000.0 + _ -> 0.0 + end + + true -> + 0.0 + end + + cond do + ms >= 1000 -> "#{Float.round(ms / 1000.0, 2)}s" + true -> "#{Float.round(ms * 1.0, 1)}ms" + end + end + + defp format_bytes_value(metric) do + # Extract value from any available field (OTEL often puts values in unexpected places) + bytes = + cond do + is_number(metric["value"]) -> metric["value"] + is_binary(metric["value"]) -> extract_number(metric["value"]) || 0 + is_number(metric["sum"]) -> metric["sum"] + is_binary(metric["sum"]) -> extract_number(metric["sum"]) || 0 + is_number(metric["duration_ms"]) -> metric["duration_ms"] + is_binary(metric["duration_ms"]) -> extract_number(metric["duration_ms"]) || 0 + true -> 0 + end + + cond do + bytes >= 1_099_511_627_776 -> "#{Float.round(bytes / 1_099_511_627_776 * 1.0, 1)} TB" + bytes >= 1_073_741_824 -> "#{Float.round(bytes / 1_073_741_824 * 1.0, 1)} GB" + bytes >= 1_048_576 -> "#{Float.round(bytes / 1_048_576 * 1.0, 1)} MB" + bytes >= 1024 -> "#{Float.round(bytes / 1024 * 1.0, 1)} KB" + true -> "#{trunc(bytes)} B" + end + end + + defp format_count_value(metric) do + count = + cond do + is_number(metric["value"]) -> metric["value"] + is_binary(metric["value"]) -> extract_number(metric["value"]) || 0 + is_number(metric["sum"]) -> metric["sum"] + is_binary(metric["sum"]) -> extract_number(metric["sum"]) || 0 + is_number(metric["count"]) -> metric["count"] + is_binary(metric["count"]) -> extract_number(metric["count"]) || 0 + is_number(metric["duration_ms"]) -> metric["duration_ms"] + is_binary(metric["duration_ms"]) -> extract_number(metric["duration_ms"]) || 0 + true -> 0 + end + + cond do + count >= 1_000_000 -> "#{Float.round(count / 1_000_000 * 1.0, 1)}M" + count >= 1_000 -> "#{Float.round(count / 1_000 * 1.0, 1)}k" + is_float(count) -> "#{Float.round(count, 0) |> trunc()}" + true -> "#{trunc(count)}" + end + end + + defp format_raw_value(metric, _metric_type) do + value = + cond do + is_number(metric["value"]) -> metric["value"] + is_binary(metric["value"]) -> extract_number(metric["value"]) + is_number(metric["sum"]) -> metric["sum"] + is_binary(metric["sum"]) -> extract_number(metric["sum"]) + is_number(metric["count"]) -> metric["count"] + is_binary(metric["count"]) -> extract_number(metric["count"]) + is_number(metric["duration_ms"]) -> metric["duration_ms"] + is_binary(metric["duration_ms"]) -> extract_number(metric["duration_ms"]) + true -> nil + end + + if is_number(value) do + cond do + value >= 1_000_000 -> "#{Float.round(value / 1_000_000 * 1.0, 1)}M" + value >= 1_000 -> "#{Float.round(value / 1_000 * 1.0, 1)}k" + is_float(value) -> "#{Float.round(value, 2)}" + true -> "#{trunc(value)}" + end + else + "—" + end + end + + # Used for the visualization bar - extracts numeric value for comparison + defp metric_value_ms(metric) when is_map(metric) do + cond do + is_number(metric["duration_ms"]) -> + metric["duration_ms"] * 1.0 + + is_binary(metric["duration_ms"]) -> + extract_number(metric["duration_ms"]) + + is_number(metric["duration_seconds"]) -> + metric["duration_seconds"] * 1000.0 + + is_binary(metric["duration_seconds"]) -> + case extract_number(metric["duration_seconds"]) do + n when is_number(n) -> n * 1000.0 + _ -> nil + end + + # Fall back to raw value for the bar visualization + is_number(metric["value"]) -> + metric["value"] * 1.0 + + is_binary(metric["value"]) -> + extract_number(metric["value"]) + + is_number(metric["sum"]) -> + metric["sum"] * 1.0 + + is_binary(metric["sum"]) -> + extract_number(metric["sum"]) + + true -> + nil + end + end + + defp metric_value_ms(_), do: nil + + attr :value, :any, default: nil + + defp severity_badge(assigns) do + variant = + case normalize_severity(assigns.value) do + s when s in ["critical", "fatal", "error"] -> "error" + s when s in ["high", "warn", "warning"] -> "warning" + s when s in ["medium", "info"] -> "info" + s when s in ["low", "debug", "trace", "ok"] -> "success" + _ -> "ghost" + end + + label = + case assigns.value do + nil -> "—" + "" -> "—" + v when is_binary(v) -> String.upcase(String.slice(v, 0, 5)) + v -> v |> to_string() |> String.upcase() |> String.slice(0, 5) + end + + assigns = assign(assigns, :variant, variant) |> assign(:label, label) + + ~H""" + <.ui_badge variant={@variant} size="xs">{@label} + """ + end + + defp normalize_severity(nil), do: "" + defp normalize_severity(v) when is_binary(v), do: v |> String.trim() |> String.downcase() + defp normalize_severity(v), do: v |> to_string() |> normalize_severity() + + defp log_id(log) do + Map.get(log, "id") || Map.get(log, "log_id") || "unknown" + end + + defp format_timestamp(log) do + ts = Map.get(log, "timestamp") || Map.get(log, "observed_timestamp") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S") + _ -> ts || "—" + end + end + + defp load_summary(srql_module, current_query) do + base_query = base_query_for_summary(current_query) + + stats_expr = + ~s|count() as total, | <> + ~s|sum(if(severity_text = 'fatal' OR severity_text = 'FATAL', 1, 0)) as fatal, | <> + ~s|sum(if(severity_text = 'error' OR severity_text = 'ERROR', 1, 0)) as error, | <> + ~s|sum(if(severity_text = 'warning' OR severity_text = 'warn' OR severity_text = 'WARNING' OR severity_text = 'WARN', 1, 0)) as warning, | <> + ~s|sum(if(severity_text = 'info' OR severity_text = 'INFO', 1, 0)) as info, | <> + ~s|sum(if(severity_text = 'debug' OR severity_text = 'trace' OR severity_text = 'DEBUG' OR severity_text = 'TRACE', 1, 0)) as debug| + + query = ~s|#{base_query} stats:"#{stats_expr}"| + + case srql_module.query(query) do + {:ok, %{"results" => [%{} = raw | _]}} -> + row = + case Map.get(raw, "payload") do + %{} = payload -> payload + _ -> raw + end + + %{ + total: row |> Map.get("total") |> to_int(), + fatal: row |> Map.get("fatal") |> to_int(), + error: row |> Map.get("error") |> to_int(), + warning: row |> Map.get("warning") |> to_int(), + info: row |> Map.get("info") |> to_int(), + debug: row |> Map.get("debug") |> to_int() + } + + _ -> + %{total: 0, fatal: 0, error: 0, warning: 0, info: 0, debug: 0} + end + end + + defp base_query_for_summary(nil), do: "in:logs time:#{@default_stats_window}" + + defp base_query_for_summary(query) when is_binary(query) do + trimmed = String.trim(query) + + cond do + trimmed == "" -> + "in:logs time:#{@default_stats_window}" + + String.contains?(trimmed, "in:logs") -> + trimmed + |> strip_tokens_for_stats() + |> ensure_time_filter() + + true -> + "in:logs time:#{@default_stats_window}" + end + end + + defp base_query_for_summary(_), do: "in:logs time:#{@default_stats_window}" + + defp strip_tokens_for_stats(query) do + query = Regex.replace(~r/(?:^|\s)limit:\S+/, query, "") + query = Regex.replace(~r/(?:^|\s)sort:\S+/, query, "") + query = Regex.replace(~r/(?:^|\s)cursor:\S+/, query, "") + query = Regex.replace(~r/(?:^|\s)stats:(?:"[^"]*"|\S+)/, query, "") + query |> String.trim() |> String.replace(~r/\s+/, " ") + end + + defp ensure_time_filter(query) do + if Regex.match?(~r/(?:^|\s)time:\S+/, query) do + query + else + "#{query} time:#{@default_stats_window}" + end + end + + defp to_int(value) when is_integer(value), do: value + defp to_int(value) when is_float(value), do: trunc(value) + + defp to_int(value) when is_binary(value) do + case Integer.parse(String.trim(value)) do + {parsed, ""} -> parsed + _ -> 0 + end + end + + defp to_int(_), do: 0 + + defp extract_stats_count({:ok, %{"results" => [%{} = raw | _]}}, key) when is_binary(key) do + row = + case Map.get(raw, "payload") do + %{} = payload -> payload + _ -> raw + end + + row |> Map.get(key) |> to_int() + end + + defp extract_stats_count({:ok, %{"results" => [value | _]}}, _key), do: to_int(value) + defp extract_stats_count(_result, _key), do: 0 + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error + + defp panel_title("traces"), do: "Traces" + defp panel_title("metrics"), do: "Metrics" + defp panel_title(_), do: "Log Stream" + + defp panel_subtitle("traces"), do: "Click a trace to jump to correlated logs." + + defp panel_subtitle("metrics"), + do: "Click a metric to jump to correlated logs (if trace_id is present)." + + defp panel_subtitle(_), do: "Click any log entry to view full details." + + defp panel_result_count("traces", _logs, traces, _metrics), do: length(traces) + defp panel_result_count("metrics", _logs, _traces, metrics), do: length(metrics) + defp panel_result_count(_, logs, _traces, _metrics), do: length(logs) + + defp default_tab_for_path("/observability"), do: "traces" + defp default_tab_for_path(_), do: "logs" + + defp ensure_srql_entity(socket, entity) when is_binary(entity) do + current = socket.assigns |> Map.get(:srql, %{}) |> Map.get(:entity) + + if current == entity do + socket + else + SRQLPage.init(socket, entity, default_limit: @default_limit) + end + end + + defp current_entity(socket) do + socket.assigns |> Map.get(:srql, %{}) |> Map.get(:entity) || "logs" + end + + defp load_trace_stats(srql_module) do + query = + "in:otel_trace_summaries time:last_24h " <> + ~s|stats:"count() as total, sum(if(status_code != 1, 1, 0)) as error_traces, sum(if(duration_ms > 100, 1, 0)) as slow_traces"| + + case srql_module.query(query) do + {:ok, %{"results" => [%{} = raw | _]}} -> + row = + case Map.get(raw, "payload") do + %{} = payload -> payload + _ -> raw + end + + %{ + total: row |> Map.get("total") |> to_int(), + error_traces: row |> Map.get("error_traces") |> to_int(), + slow_traces: row |> Map.get("slow_traces") |> to_int() + } + + _ -> + %{total: 0, error_traces: 0, slow_traces: 0} + end + end + + defp load_metrics_counts(srql_module) do + total_query = ~s|in:otel_metrics time:last_24h stats:"count() as total"| + slow_query = ~s|in:otel_metrics time:last_24h is_slow:true stats:"count() as total"| + + error_level_query = + ~s|in:otel_metrics time:last_24h level:(error,ERROR) stats:"count() as total"| + + error_http4_query = + ~s|in:otel_metrics time:last_24h http_status_code:4% stats:"count() as total"| + + error_http5_query = + ~s|in:otel_metrics time:last_24h http_status_code:5% stats:"count() as total"| + + error_grpc_query = + ~s|in:otel_metrics time:last_24h !grpc_status_code:0 !grpc_status_code:"" stats:"count() as total"| + + total = extract_stats_count(srql_module.query(total_query), "total") + slow_spans = extract_stats_count(srql_module.query(slow_query), "total") + + error_level = extract_stats_count(srql_module.query(error_level_query), "total") + + error_spans = + if error_level > 0 do + error_level + else + error_http4 = extract_stats_count(srql_module.query(error_http4_query), "total") + error_http5 = extract_stats_count(srql_module.query(error_http5_query), "total") + error_grpc = extract_stats_count(srql_module.query(error_grpc_query), "total") + error_http4 + error_http5 + error_grpc + end + + %{total: total, slow_spans: slow_spans, error_spans: error_spans} + end + + # Load duration stats from the continuous aggregation for full 24h data + defp load_duration_stats_from_cagg do + cutoff = DateTime.add(DateTime.utc_now(), -24, :hour) + + query = + from(s in "otel_metrics_hourly_stats", + where: s.bucket >= ^cutoff, + select: %{ + total_count: sum(s.total_count), + avg_duration_ms: + fragment( + "CASE WHEN SUM(?) > 0 THEN SUM(? * ?) / SUM(?) ELSE 0 END", + s.total_count, + s.avg_duration_ms, + s.total_count, + s.total_count + ), + p95_duration_ms: max(s.p95_duration_ms) + } + ) + + case Repo.one(query) do + %{total_count: total} = stats when not is_nil(total) and total > 0 -> + avg = case stats.avg_duration_ms do + %Decimal{} = d -> Decimal.to_float(d) + n when is_number(n) -> n * 1.0 + _ -> 0.0 + end + + p95 = case stats.p95_duration_ms do + %Decimal{} = d -> Decimal.to_float(d) + n when is_number(n) -> n * 1.0 + _ -> 0.0 + end + + %{ + avg_duration_ms: avg, + p95_duration_ms: p95, + sample_size: to_int(total) + } + + _ -> + %{avg_duration_ms: 0.0, p95_duration_ms: 0.0, sample_size: 0} + end + rescue + e -> + require Logger + Logger.warning("Failed to load duration stats from cagg: #{inspect(e)}") + %{avg_duration_ms: 0.0, p95_duration_ms: 0.0, sample_size: 0} + end + + # Load sparkline data for gauge/counter metrics + # Returns a map of metric_name -> list of {bucket, avg_value} tuples + defp load_sparklines(metrics) when is_list(metrics) do + # Extract unique metric names for gauges and counters + metric_names = + metrics + |> Enum.filter(fn m -> + type = normalize_string(Map.get(m, "metric_type")) + type in ["gauge", "counter"] + end) + |> Enum.map(fn m -> Map.get(m, "metric_name") end) + |> Enum.filter(&is_binary/1) + |> Enum.uniq() + + if metric_names == [] do + %{} + else + # Query last 2 hours of data, bucketed into 5-minute intervals (24 points) + cutoff = DateTime.add(DateTime.utc_now(), -2, :hour) + + query = + from(m in "otel_metrics", + where: m.metric_name in ^metric_names and m.timestamp >= ^cutoff, + group_by: [m.metric_name, fragment("time_bucket('5 minutes', ?)", m.timestamp)], + order_by: [m.metric_name, fragment("time_bucket('5 minutes', ?)", m.timestamp)], + select: %{ + metric_name: m.metric_name, + bucket: fragment("time_bucket('5 minutes', ?)", m.timestamp), + avg_value: avg(m.value) + } + ) + + query + |> Repo.all() + |> Enum.group_by(& &1.metric_name, fn row -> + # Extract just the numeric value for sparklines + case row.avg_value do + %Decimal{} = d -> Decimal.to_float(d) + n when is_number(n) -> n * 1.0 + _ -> 0.0 + end + end) + end + rescue + e -> + # Log error but don't crash - sparklines are nice-to-have + require Logger + Logger.warning("Failed to load sparklines: #{inspect(e)}") + %{} + end + + defp load_sparklines(_), do: %{} + + defp compute_error_rate(total, errors) when is_integer(total) and total > 0 do + Float.round(errors / total * 100.0, 1) + end + + defp compute_error_rate(_total, _errors), do: 0.0 + + defp compute_trace_latency(rows) do + # For trace summaries, don't filter by is_timing_metric since traces are inherently timing data + duration_stats = compute_trace_duration_stats(rows) + services = unique_services_from_traces(rows) + Map.put(duration_stats, :service_count, map_size(services)) + end + + # Compute duration stats specifically for trace summaries (no HTTP/gRPC filter needed) + defp compute_trace_duration_stats(rows) when is_list(rows) do + durations = + rows + |> Enum.filter(&is_map/1) + |> Enum.map(fn row -> extract_number(Map.get(row, "duration_ms")) end) + |> Enum.filter(&is_number/1) + |> Enum.filter(fn ms -> ms >= 0 and ms < 3_600_000 end) + + sample_size = length(durations) + + avg = + if sample_size > 0 do + Enum.sum(durations) / sample_size + else + 0.0 + end + + p95 = + if sample_size > 0 do + sorted = Enum.sort(durations) + idx = trunc(Float.floor(sample_size * 0.95)) + Enum.at(sorted, min(idx, sample_size - 1)) || 0.0 + else + 0.0 + end + + %{avg_duration_ms: avg, p95_duration_ms: p95, sample_size: sample_size} + end + + defp compute_trace_duration_stats(_), do: %{avg_duration_ms: 0.0, p95_duration_ms: 0.0, sample_size: 0} + + defp unique_services_from_traces(rows) when is_list(rows) do + rows + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn row, acc -> + name = Map.get(row, "root_service_name") || Map.get(row, "service_name") + + if is_binary(name) and String.trim(name) != "" do + Map.put(acc, name, true) + else + acc + end + end) + end + + defp unique_services_from_traces(_), do: %{} + + defp format_pct(value) when is_float(value), do: :erlang.float_to_binary(value, decimals: 1) + defp format_pct(value) when is_integer(value), do: Integer.to_string(value) + defp format_pct(_), do: "0.0" + + defp format_compact_int(n) when is_integer(n) and n >= 1_000_000 do + :erlang.float_to_binary(n / 1_000_000, decimals: 1) + |> String.trim_trailing("0") + |> String.trim_trailing(".") + |> Kernel.<>("M") + end + + defp format_compact_int(n) when is_integer(n) and n >= 1_000 do + :erlang.float_to_binary(n / 1_000, decimals: 1) + |> String.trim_trailing("0") + |> String.trim_trailing(".") + |> Kernel.<>("k") + end + + defp format_compact_int(n) when is_integer(n), do: Integer.to_string(n) + defp format_compact_int(_), do: "0" + + defp error_count_class(count) when is_integer(count) and count > 0, do: "text-error font-bold" + defp error_count_class(_), do: "text-base-content/60" + + defp metric_operation(metric) do + http_route = Map.get(metric, "http_route") + http_method = Map.get(metric, "http_method") + grpc_service = Map.get(metric, "grpc_service") + grpc_method = Map.get(metric, "grpc_method") + + cond do + is_binary(grpc_service) and grpc_service != "" and is_binary(grpc_method) and + grpc_method != "" -> + "#{grpc_service}/#{grpc_method}" + + is_binary(http_method) and http_method != "" and is_binary(http_route) and http_route != "" -> + "#{http_method} #{http_route}" + + is_binary(http_route) and http_route != "" -> + http_route + + true -> + Map.get(metric, "span_name") || "—" + end + end + + defp correlate_trace_href(trace) do + trace_id = trace |> Map.get("trace_id") |> escape_srql_value() + q = "in:logs trace_id:\"#{trace_id}\" time:last_24h sort:timestamp:desc" + "/observability?" <> URI.encode_query(%{tab: "logs", q: q, limit: 50}) + end + + defp correlate_metric_href(metric) do + trace_id = metric |> Map.get("trace_id") + + if is_binary(trace_id) and trace_id != "" do + q = "in:logs trace_id:\"#{escape_srql_value(trace_id)}\" time:last_24h sort:timestamp:desc" + "/observability?" <> URI.encode_query(%{tab: "logs", q: q, limit: 50}) + else + "/observability?" <> URI.encode_query(%{tab: "logs"}) + end + end + + defp escape_srql_value(nil), do: "" + + defp escape_srql_value(value) when is_binary(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("\"", "\\\"") + end + + defp escape_srql_value(value), do: value |> to_string() |> escape_srql_value() + + defp extract_number(value) when is_number(value), do: value + + defp extract_number(value) when is_binary(value) do + case Float.parse(String.trim(value)) do + {n, ""} -> n + _ -> nil + end + end + + defp extract_number(_), do: nil + + defp log_service(log) do + service = + Map.get(log, "service_name") || + Map.get(log, "source") || + Map.get(log, "scope_name") + + case service do + nil -> "—" + "" -> "—" + v when is_binary(v) -> v + v -> to_string(v) + end + end + + defp log_message(log) do + message = + Map.get(log, "body") || + Map.get(log, "message") || + Map.get(log, "short_message") + + case message do + nil -> "—" + "" -> "—" + v when is_binary(v) -> String.slice(v, 0, 300) + v -> v |> to_string() |> String.slice(0, 300) + end + end + + # Compute summary stats from logs + # Must match the same patterns as severity_badge for consistency + defp compute_summary(logs) when is_list(logs) do + initial = %{total: 0, fatal: 0, error: 0, warning: 0, info: 0, debug: 0} + + Enum.reduce(logs, initial, fn log, acc -> + severity = normalize_severity(Map.get(log, "severity_text")) + + updated = + case severity do + s when s in ["fatal", "critical"] -> Map.update!(acc, :fatal, &(&1 + 1)) + s when s in ["error", "err"] -> Map.update!(acc, :error, &(&1 + 1)) + s when s in ["warn", "warning", "high"] -> Map.update!(acc, :warning, &(&1 + 1)) + s when s in ["info", "information", "medium"] -> Map.update!(acc, :info, &(&1 + 1)) + s when s in ["debug", "trace", "low", "ok"] -> Map.update!(acc, :debug, &(&1 + 1)) + _ -> acc + end + + Map.update!(updated, :total, &(&1 + 1)) + end) + end + + defp compute_summary(_), do: %{total: 0, fatal: 0, error: 0, warning: 0, info: 0, debug: 0} +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/log_live/show.ex b/web-ng/lib/serviceradar_web_ng_web/live/log_live/show.ex new file mode 100644 index 000000000..d4d3cfc4f --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/log_live/show.ex @@ -0,0 +1,354 @@ +defmodule ServiceRadarWebNGWeb.LogLive.Show do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Log Details") + |> assign(:log_id, nil) + |> assign(:log, nil) + |> assign(:error, nil) + |> assign(:srql, %{enabled: false})} + end + + @impl true + def handle_params(%{"log_id" => log_id}, _uri, socket) do + # Try log_id first, then fall back to checking if it matches any unique identifier + query = "in:logs log_id:\"#{escape_value(log_id)}\" limit:1" + + {log, error} = + case srql_module().query(query) do + {:ok, %{"results" => [log | _]}} when is_map(log) -> + {log, nil} + + {:ok, %{"results" => []}} -> + # Try alternate query without filter - just return not found + # The logs entity doesn't support id/log_id filtering consistently + {nil, "Log entry not found. Note: Log detail view requires log_id field support."} + + {:ok, _other} -> + {nil, "Unexpected response format"} + + {:error, reason} -> + # If log_id filter not supported, show helpful message + error_msg = format_error(reason) + + if String.contains?(error_msg, "unsupported filter") do + {nil, + "Log detail view is not available - the logs entity does not support ID-based filtering."} + else + {nil, "Failed to load log: #{error_msg}"} + end + end + + {:noreply, + socket + |> assign(:log_id, log_id) + |> assign(:log, log) + |> assign(:error, error)} + end + + @impl true + def render(assigns) do + ~H""" + +
+ <.header> + Log Entry + <:subtitle> + {@log_id} + + <:actions> + <.ui_button href={~p"/observability?#{%{tab: "logs"}}"} variant="ghost" size="sm"> + Back to logs + + + + +
+

{@error}

+
+ +
+ <.log_summary log={@log} /> + <.log_body log={@log} /> + <.log_details log={@log} /> +
+
+
+ """ + end + + attr :log, :map, required: true + + defp log_summary(assigns) do + ~H""" +
+
+
+ Level + <.severity_badge value={Map.get(@log, "severity_text")} /> +
+ +
+ Time + {format_timestamp(@log)} +
+ +
+ Service + {Map.get(@log, "service_name")} +
+ +
+ Scope + {Map.get(@log, "scope_name")} +
+ +
+ Trace ID + {Map.get(@log, "trace_id")} +
+ +
+ Span ID + {Map.get(@log, "span_id")} +
+
+
+ """ + end + + attr :log, :map, required: true + + defp log_body(assigns) do + body = Map.get(assigns.log, "body") || Map.get(assigns.log, "message") || "" + + is_json = + String.starts_with?(String.trim(body), "{") or String.starts_with?(String.trim(body), "[") + + formatted_body = + if is_json do + case Jason.decode(body) do + {:ok, decoded} -> Jason.encode!(decoded, pretty: true) + {:error, _} -> body + end + else + body + end + + assigns = + assigns + |> assign(:body, body) + |> assign(:formatted_body, formatted_body) + |> assign(:is_json, is_json) + + ~H""" +
+
+ Message Body +
+ +
+
{@formatted_body}
+
+
+ """ + end + + attr :log, :map, required: true + + defp log_details(assigns) do + # Fields shown in summary or body (exclude from details) + summary_fields = + ~w(id log_id severity_text severity_number timestamp observed_timestamp service_name scope_name trace_id span_id body message) + + # Get remaining fields + detail_fields = + assigns.log + |> Map.keys() + |> Enum.reject(&(&1 in summary_fields)) + |> Enum.sort() + + assigns = assign(assigns, :detail_fields, detail_fields) + + ~H""" +
+
+ Additional Metadata +
+ +
+ <%= for field <- @detail_fields do %> +
+ + {humanize_field(field)} + + + <.format_value value={Map.get(@log, field)} /> + +
+ <% end %> +
+
+ """ + end + + attr :value, :any, default: nil + + defp format_value(%{value: nil} = assigns) do + ~H|| + end + + defp format_value(%{value: ""} = assigns) do + ~H|| + end + + defp format_value(%{value: value} = assigns) when is_boolean(value) do + ~H""" + <.ui_badge variant={if @value, do: "success", else: "error"} size="xs"> + {to_string(@value)} + + """ + end + + defp format_value(%{value: value} = assigns) when is_map(value) or is_list(value) do + formatted = Jason.encode!(value, pretty: true) + assigns = assign(assigns, :formatted, formatted) + + ~H""" +
{@formatted}
+ """ + end + + defp format_value(%{value: value} = assigns) when is_binary(value) do + # Check if it looks like JSON + if String.starts_with?(value, "{") or String.starts_with?(value, "[") do + case Jason.decode(value) do + {:ok, decoded} -> + formatted = Jason.encode!(decoded, pretty: true) + assigns = assign(assigns, :formatted, formatted) + + ~H""" +
{@formatted}
+ """ + + {:error, _} -> + ~H""" + {@value} + """ + end + else + ~H""" + {@value} + """ + end + end + + defp format_value(assigns) do + ~H""" + {to_string(@value)} + """ + end + + attr :value, :any, default: nil + + defp severity_badge(assigns) do + variant = + case normalize_severity(assigns.value) do + s when s in ["critical", "fatal", "error"] -> "error" + s when s in ["high", "warn", "warning"] -> "warning" + s when s in ["medium", "info"] -> "info" + s when s in ["low", "debug", "trace", "ok"] -> "success" + _ -> "ghost" + end + + label = + case assigns.value do + nil -> "—" + "" -> "—" + v when is_binary(v) -> String.upcase(v) + v -> v |> to_string() |> String.upcase() + end + + assigns = assign(assigns, :variant, variant) |> assign(:label, label) + + ~H""" + <.ui_badge variant={@variant} size="sm">{@label} + """ + end + + defp normalize_severity(nil), do: "" + defp normalize_severity(v) when is_binary(v), do: v |> String.trim() |> String.downcase() + defp normalize_severity(v), do: v |> to_string() |> normalize_severity() + + defp format_timestamp(log) do + ts = Map.get(log, "timestamp") || Map.get(log, "observed_timestamp") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC") + _ -> ts || "—" + end + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error + + defp has_value?(map, key) do + case Map.get(map, key) do + nil -> false + "" -> false + _ -> true + end + end + + defp humanize_field(field) when is_binary(field) do + field + |> String.replace("_", " ") + |> String.split() + |> Enum.map(&String.capitalize/1) + |> Enum.join(" ") + end + + defp humanize_field(field), do: to_string(field) + + defp escape_value(value) when is_binary(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("\"", "\\\"") + end + + defp escape_value(other), do: escape_value(to_string(other)) + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/metric_live/show.ex b/web-ng/lib/serviceradar_web_ng_web/live/metric_live/show.ex new file mode 100644 index 000000000..03ab2974f --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/metric_live/show.ex @@ -0,0 +1,537 @@ +defmodule ServiceRadarWebNGWeb.MetricLive.Show do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + @recent_window "last_1h" + @recent_limit 60 + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Metric") + |> assign(:span_id, nil) + |> assign(:metric, nil) + |> assign(:recent, []) + |> assign(:histogram, nil) + |> assign(:error, nil) + |> assign(:recent_window, @recent_window)} + end + + @impl true + def handle_params(%{"span_id" => span_id}, _uri, socket) do + srql = srql_module() + span_id = span_id |> to_string() |> String.trim() + + socket = + socket + |> assign(:span_id, span_id) + |> load_metric(srql, span_id) + + {:noreply, socket} + end + + def handle_params(_params, _uri, socket) do + {:noreply, assign(socket, :error, "Missing metric span_id")} + end + + @impl true + def render(assigns) do + ~H""" + +
+ <.header> + Metric + <:subtitle> + {@span_id || "—"} + + <:actions> + <.ui_button href={~p"/observability?#{%{tab: "metrics"}}"} variant="ghost" size="sm"> + Back to Observability + + + + +
+ <.icon name="hero-exclamation-triangle" class="size-5" /> + {@error} +
+ +
+ <.ui_panel class="lg:col-span-2"> + <:header> +
+
Details
+
+ {Map.get(@metric, "service_name") || "—"} · {metric_operation(@metric)} +
+
+
+ <.link + :if={is_binary(Map.get(@metric, "trace_id")) and Map.get(@metric, "trace_id") != ""} + href={correlated_logs_href(@metric)} + class="btn btn-xs btn-outline" + > + Logs + + <.link + :if={is_binary(Map.get(@metric, "trace_id")) and Map.get(@metric, "trace_id") != ""} + href={correlated_trace_href(@metric)} + class="btn btn-xs btn-outline" + > + Trace + +
+ + +
+ <.kv label="Time" value={format_timestamp(@metric)} mono /> + <.kv label="Service" value={Map.get(@metric, "service_name")} /> + <.kv label="Type" value={Map.get(@metric, "metric_type")} /> + <.kv label="Operation" value={metric_operation(@metric)} /> + <.kv label="Value" value={format_metric_value(@metric)} mono /> + <.kv + label="Slow" + value={if Map.get(@metric, "is_slow") == true, do: "true", else: "false"} + mono + /> + <.kv label="HTTP" value={http_summary(@metric)} /> + <.kv label="gRPC" value={grpc_summary(@metric)} /> + <.kv label="Trace ID" value={Map.get(@metric, "trace_id")} mono /> + <.kv label="Span ID" value={Map.get(@metric, "span_id")} mono /> + <.kv label="Component" value={Map.get(@metric, "component")} /> + <.kv label="Level" value={Map.get(@metric, "level")} /> +
+ + + <.ui_panel> + <:header> +
+
Visualization
+
+ Sample from {@recent_window} ({length(@recent)} points) +
+
+ + +
+ No recent samples found for this metric. +
+ +
+
+ Histogram of recent values (sample-based) +
+ <.histogram bins={Map.get(@histogram, :bins, [])} /> +
+ min={format_ms_number(Map.get(@histogram, :min, 0.0))}ms · p50={format_ms_number( + Map.get(@histogram, :p50, 0.0) + )}ms · p95={format_ms_number(Map.get(@histogram, :p95, 0.0))}ms · max={format_ms_number( + Map.get(@histogram, :max, 0.0) + )}ms +
+
+ +
+
+ Recent values (sample-based) +
+ <.sparkline values={Enum.map(@recent, &metric_value_ms/1)} /> +
+ +
+
+
+ """ + end + + attr :label, :string, required: true + attr :value, :any, default: nil + attr :mono, :boolean, default: false + + defp kv(assigns) do + ~H""" +
+
{@label}
+
{format_value(@value)}
+
+ """ + end + + defp format_value(nil), do: "—" + defp format_value(""), do: "—" + defp format_value(v) when is_binary(v), do: v + defp format_value(v), do: to_string(v) + + attr :values, :list, default: [] + + defp sparkline(assigns) do + values = + assigns.values + |> Enum.filter(&is_number/1) + |> Enum.take(@recent_limit) + |> Enum.reverse() + + {min_v, max_v} = + case values do + [] -> {0.0, 0.0} + _ -> {Enum.min(values), Enum.max(values)} + end + + points = + values + |> Enum.with_index() + |> Enum.map(fn {v, idx} -> + x = if length(values) > 1, do: idx / (length(values) - 1) * 200.0, else: 0.0 + y = normalize_y(v, min_v, max_v) + "#{fmt(x)},#{fmt(y)}" + end) + |> Enum.join(" ") + + assigns = + assigns + |> assign(:points, points) + + ~H""" +
+ + + +
+ """ + end + + defp normalize_y(v, min_v, max_v) do + range = if max_v == min_v, do: 1.0, else: max_v - min_v + 55.0 - (v - min_v) / range * 50.0 + end + + defp fmt(num) when is_float(num), do: :erlang.float_to_binary(num, decimals: 1) + defp fmt(num) when is_integer(num), do: Integer.to_string(num) + + defp format_ms_number(value) when is_float(value) do + :erlang.float_to_binary(value, decimals: 1) + |> String.trim_trailing("0") + |> String.trim_trailing(".") + end + + defp format_ms_number(value) when is_integer(value), do: Integer.to_string(value) + defp format_ms_number(_), do: "0" + + attr :bins, :list, default: [] + + defp histogram(assigns) do + max_count = + assigns.bins + |> Enum.map(& &1.count) + |> case do + [] -> 0 + values -> Enum.max(values) + end + + assigns = assign(assigns, :max_count, max_count) + + ~H""" +
+ <%= for bin <- @bins do %> + <% height = + if @max_count > 0 do + max(2, round(bin.count / @max_count * 100)) + else + 0 + end %> +
+
+
+ <% end %> +
+ """ + end + + defp load_metric(socket, srql, span_id) do + query = "in:otel_metrics span_id:\"#{escape_srql(span_id)}\" sort:timestamp:desc limit:1" + + case srql.query(query) do + {:ok, %{"results" => [%{} = metric | _]}} -> + socket + |> assign(:metric, metric) + |> assign(:error, nil) + |> load_recent(srql, metric) + + {:ok, %{"results" => []}} -> + assign(socket, :error, "Metric not found") |> assign(:metric, nil) + + {:error, reason} -> + assign(socket, :error, "SRQL error: #{format_error(reason)}") |> assign(:metric, nil) + + {:ok, other} -> + assign(socket, :error, "Unexpected response: #{inspect(other)}") |> assign(:metric, nil) + end + end + + defp load_recent(socket, srql, %{} = metric) do + service = Map.get(metric, "service_name") + operation = Map.get(metric, "span_name") + metric_type = Map.get(metric, "metric_type") + + query = + [ + "in:otel_metrics", + "time:#{@recent_window}", + (is_binary(service) and service != "") && "service_name:\"#{escape_srql(service)}\"", + (is_binary(operation) and operation != "") && "span_name:\"#{escape_srql(operation)}\"", + (is_binary(metric_type) and metric_type != "") && + "metric_type:\"#{escape_srql(metric_type)}\"", + "sort:timestamp:desc", + "limit:#{@recent_limit}" + ] + |> Enum.filter(&is_binary/1) + |> Enum.join(" ") + + case srql.query(query) do + {:ok, %{"results" => rows}} when is_list(rows) -> + histogram = + case normalize_metric_type(metric_type) do + "histogram" -> build_histogram(rows) + _ -> nil + end + + socket + |> assign(:recent, rows) + |> assign(:histogram, histogram) + + _ -> + assign(socket, :recent, []) |> assign(:histogram, nil) + end + end + + defp build_histogram(rows) when is_list(rows) do + values = rows |> Enum.map(&metric_value_ms/1) |> Enum.filter(&is_number/1) + + if values == [] do + nil + else + min_v = Enum.min(values) + max_v = Enum.max(values) + bins = 10 + range = if max_v == min_v, do: 1.0, else: max_v - min_v + width = range / bins + + counts = + Enum.reduce(values, List.duplicate(0, bins), fn v, acc -> + idx = + if width <= 0 do + 0 + else + trunc((v - min_v) / width) + end + + idx = idx |> max(0) |> min(bins - 1) + List.update_at(acc, idx, &(&1 + 1)) + end) + + sorted = Enum.sort(values) + p50 = Enum.at(sorted, trunc(length(sorted) * 0.50)) || min_v + p95 = Enum.at(sorted, trunc(length(sorted) * 0.95)) || max_v + + %{ + min: Float.round(min_v * 1.0, 1), + p50: Float.round(p50 * 1.0, 1), + p95: Float.round(p95 * 1.0, 1), + max: Float.round(max_v * 1.0, 1), + bins: + counts + |> Enum.with_index() + |> Enum.map(fn {count, idx} -> + %{ + idx: idx, + count: count, + from: Float.round(min_v + idx * width, 1), + to: Float.round(min_v + (idx + 1) * width, 1) + } + end) + } + end + end + + defp build_histogram(_), do: nil + + defp format_metric_value(metric) do + value = metric_value_ms(metric) + if is_number(value), do: "#{Float.round(value * 1.0, 1)}ms", else: "—" + end + + defp metric_value_ms(%{} = metric) do + cond do + is_number(metric["duration_ms"]) -> + metric["duration_ms"] * 1.0 + + is_binary(metric["duration_ms"]) -> + parse_float(metric["duration_ms"]) + + is_number(metric["duration_seconds"]) -> + metric["duration_seconds"] * 1000.0 + + is_binary(metric["duration_seconds"]) -> + case parse_float(metric["duration_seconds"]) do + n when is_number(n) -> n * 1000.0 + _ -> nil + end + + true -> + nil + end + end + + defp metric_value_ms(_), do: nil + + defp parse_float(value) when is_binary(value) do + case Float.parse(String.trim(value)) do + {n, ""} -> n + _ -> nil + end + end + + defp parse_float(_), do: nil + + defp metric_operation(metric) do + http_route = Map.get(metric, "http_route") + http_method = Map.get(metric, "http_method") + grpc_service = Map.get(metric, "grpc_service") + grpc_method = Map.get(metric, "grpc_method") + + cond do + is_binary(grpc_service) and grpc_service != "" and is_binary(grpc_method) and + grpc_method != "" -> + "#{grpc_service}/#{grpc_method}" + + is_binary(http_method) and http_method != "" and is_binary(http_route) and http_route != "" -> + "#{http_method} #{http_route}" + + is_binary(http_route) and http_route != "" -> + http_route + + true -> + Map.get(metric, "span_name") || "—" + end + end + + defp http_summary(metric) do + method = Map.get(metric, "http_method") + route = Map.get(metric, "http_route") + status = Map.get(metric, "http_status_code") + + cond do + is_binary(method) and method != "" and is_binary(route) and route != "" -> + "#{method} #{route} (#{status || "—"})" + + is_binary(route) and route != "" -> + "#{route} (#{status || "—"})" + + true -> + "—" + end + end + + defp grpc_summary(metric) do + service = Map.get(metric, "grpc_service") + method = Map.get(metric, "grpc_method") + status = Map.get(metric, "grpc_status_code") + + cond do + is_binary(service) and service != "" and is_binary(method) and method != "" -> + "#{service}/#{method} (#{status || "—"})" + + is_binary(service) and service != "" -> + "#{service} (#{status || "—"})" + + true -> + "—" + end + end + + defp correlated_logs_href(metric) do + trace_id = Map.get(metric, "trace_id") + + q = + "in:logs trace_id:\"#{escape_srql(trace_id)}\" time:last_24h sort:timestamp:desc limit:50" + + "/observability?" <> URI.encode_query(%{tab: "logs", q: q, limit: 50}) + end + + defp correlated_trace_href(metric) do + trace_id = Map.get(metric, "trace_id") + + q = + "in:otel_trace_summaries trace_id:\"#{escape_srql(trace_id)}\" time:last_24h sort:timestamp:desc limit:20" + + "/observability?" <> URI.encode_query(%{tab: "traces", q: q, limit: 20}) + end + + defp format_timestamp(row) do + ts = Map.get(row, "timestamp") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S") + _ -> ts || "—" + end + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error + + defp normalize_metric_type(nil), do: "" + + defp normalize_metric_type(value) when is_binary(value), + do: value |> String.trim() |> String.downcase() + + defp normalize_metric_type(value), do: value |> to_string() |> normalize_metric_type() + + defp escape_srql(nil), do: "" + + defp escape_srql(value) when is_binary(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("\"", "\\\"") + end + + defp escape_srql(value), do: value |> to_string() |> escape_srql() + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/poller_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/poller_live/index.ex new file mode 100644 index 000000000..bf9e9bd9f --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/poller_live/index.ex @@ -0,0 +1,215 @@ +defmodule ServiceRadarWebNGWeb.PollerLive.Index do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + alias Phoenix.LiveView.JS + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + @default_limit 20 + @max_limit 100 + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Pollers") + |> assign(:pollers, []) + |> assign(:limit, @default_limit) + |> SRQLPage.init("pollers", default_limit: @default_limit)} + end + + @impl true + def handle_params(params, uri, socket) do + {:noreply, + socket + |> SRQLPage.load_list(params, uri, :pollers, + default_limit: @default_limit, + max_limit: @max_limit + )} + end + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_submit", params, fallback_path: "/pollers")} + end + + def handle_event("srql_builder_toggle", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: "pollers")} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_run", %{}, fallback_path: "/pollers")} + end + + def handle_event("srql_builder_add_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_add_filter", params, entity: "pollers")} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, entity: "pollers")} + end + + @impl true + def render(assigns) do + pagination = get_in(assigns, [:srql, :pagination]) || %{} + assigns = assign(assigns, :pagination, pagination) + + ~H""" + +
+ <.ui_panel> + <.pollers_table id="pollers" pollers={@pollers} /> + +
+ <.ui_pagination + prev_cursor={Map.get(@pagination, "prev_cursor")} + next_cursor={Map.get(@pagination, "next_cursor")} + base_path="/pollers" + query={Map.get(@srql, :query, "")} + limit={@limit} + result_count={length(@pollers)} + /> +
+ +
+
+ """ + end + + attr :id, :string, required: true + attr :pollers, :list, default: [] + + defp pollers_table(assigns) do + ~H""" +
+ + + + + + + + + + + + + + + <%= for {poller, idx} <- Enum.with_index(@pollers) do %> + + + + + + + <% end %> + +
+ Poller ID + + Status + + Address + + Last Seen +
+ No pollers found. +
+ {poller_id(poller)} + + <.status_badge active={Map.get(poller, "is_active")} /> + + {poller_address(poller)} + + {format_timestamp(poller)} +
+
+ """ + end + + attr :active, :any, default: nil + + defp status_badge(assigns) do + {label, variant} = + case assigns.active do + true -> {"Active", "success"} + false -> {"Inactive", "error"} + _ -> {"Unknown", "ghost"} + end + + assigns = assign(assigns, :label, label) |> assign(:variant, variant) + + ~H""" + <.ui_badge variant={@variant} size="xs">{@label} + """ + end + + defp poller_id(poller) do + Map.get(poller, "poller_id") || Map.get(poller, "id") || "unknown" + end + + defp poller_address(poller) do + Map.get(poller, "address") || + Map.get(poller, "poller_address") || + Map.get(poller, "host") || + Map.get(poller, "hostname") || + Map.get(poller, "ip") || + Map.get(poller, "ip_address") || + "—" + end + + defp format_timestamp(poller) do + ts = Map.get(poller, "last_seen") || Map.get(poller, "updated_at") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S") + _ -> ts || "—" + end + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/poller_live/show.ex b/web-ng/lib/serviceradar_web_ng_web/live/poller_live/show.ex new file mode 100644 index 000000000..07480cdaa --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/poller_live/show.ex @@ -0,0 +1,290 @@ +defmodule ServiceRadarWebNGWeb.PollerLive.Show do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Poller Details") + |> assign(:poller_id, nil) + |> assign(:poller, nil) + |> assign(:error, nil) + |> assign(:srql, %{enabled: false})} + end + + @impl true + def handle_params(%{"poller_id" => poller_id}, _uri, socket) do + query = "in:pollers poller_id:\"#{escape_value(poller_id)}\" limit:1" + + {poller, error} = + case srql_module().query(query) do + {:ok, %{"results" => [poller | _]}} when is_map(poller) -> + {poller, nil} + + {:ok, %{"results" => []}} -> + {nil, "Poller not found"} + + {:ok, _other} -> + {nil, "Unexpected response format"} + + {:error, reason} -> + {nil, "Failed to load poller: #{format_error(reason)}"} + end + + {:noreply, + socket + |> assign(:poller_id, poller_id) + |> assign(:poller, poller) + |> assign(:error, error)} + end + + @impl true + def render(assigns) do + ~H""" + +
+ <.header> + Poller Details + <:subtitle> + {@poller_id} + + <:actions> + <.ui_button href={~p"/pollers"} variant="ghost" size="sm"> + Back to pollers + + + + +
+

{@error}

+
+ +
+ <.poller_summary poller={@poller} /> + <.poller_details poller={@poller} /> +
+
+
+ """ + end + + attr :poller, :map, required: true + + defp poller_summary(assigns) do + ~H""" +
+
+
+ Status + <.status_badge active={Map.get(@poller, "is_active")} /> +
+ +
+ Poller ID + {Map.get(@poller, "poller_id") || "—"} +
+ +
+ Address + {Map.get(@poller, "address")} +
+ +
+ Last Seen + {format_timestamp(@poller, "last_seen")} +
+ +
+ Created + {format_timestamp(@poller, "created_at")} +
+
+
+ """ + end + + attr :poller, :map, required: true + + defp poller_details(assigns) do + # Fields shown in summary (exclude from details) + summary_fields = ~w(id poller_id is_active address last_seen created_at updated_at) + + # Get remaining fields, excluding empty maps + detail_fields = + assigns.poller + |> Map.keys() + |> Enum.reject(&(&1 in summary_fields)) + |> Enum.reject(fn key -> + value = Map.get(assigns.poller, key) + is_map(value) and map_size(value) == 0 + end) + |> Enum.sort() + + assigns = assign(assigns, :detail_fields, detail_fields) + + ~H""" +
+
+ Additional Details +
+ +
+ <%= for field <- @detail_fields do %> +
+ + {humanize_field(field)} + + + <.format_value value={Map.get(@poller, field)} /> + +
+ <% end %> +
+
+ """ + end + + attr :value, :any, default: nil + + defp format_value(%{value: nil} = assigns) do + ~H|| + end + + defp format_value(%{value: ""} = assigns) do + ~H|| + end + + defp format_value(%{value: value} = assigns) when is_boolean(value) do + ~H""" + <.ui_badge variant={if @value, do: "success", else: "error"} size="xs"> + {to_string(@value)} + + """ + end + + defp format_value(%{value: value} = assigns) when is_map(value) and map_size(value) == 0 do + ~H|| + end + + defp format_value(%{value: value} = assigns) when is_map(value) or is_list(value) do + formatted = Jason.encode!(value, pretty: true) + assigns = assign(assigns, :formatted, formatted) + + ~H""" +
{@formatted}
+ """ + end + + defp format_value(%{value: value} = assigns) when is_binary(value) do + # Check if it looks like JSON + if String.starts_with?(value, "{") or String.starts_with?(value, "[") do + case Jason.decode(value) do + {:ok, decoded} -> + formatted = Jason.encode!(decoded, pretty: true) + assigns = assign(assigns, :formatted, formatted) + + ~H""" +
{@formatted}
+ """ + + {:error, _} -> + ~H""" + {@value} + """ + end + else + ~H""" + {@value} + """ + end + end + + defp format_value(assigns) do + ~H""" + {to_string(@value)} + """ + end + + attr :active, :any, default: nil + + defp status_badge(assigns) do + {label, variant} = + case assigns.active do + true -> {"Active", "success"} + false -> {"Inactive", "error"} + _ -> {"Unknown", "ghost"} + end + + assigns = assign(assigns, :label, label) |> assign(:variant, variant) + + ~H""" + <.ui_badge variant={@variant} size="sm">{@label} + """ + end + + defp format_timestamp(poller, field) do + ts = Map.get(poller, field) + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S UTC") + _ -> ts || "—" + end + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error + + defp has_value?(map, key) do + case Map.get(map, key) do + nil -> false + "" -> false + _ -> true + end + end + + defp humanize_field(field) when is_binary(field) do + field + |> String.replace("_", " ") + |> String.split() + |> Enum.map(&String.capitalize/1) + |> Enum.join(" ") + end + + defp humanize_field(field), do: to_string(field) + + defp escape_value(value) when is_binary(value) do + value + |> String.replace("\\", "\\\\") + |> String.replace("\"", "\\\"") + end + + defp escape_value(other), do: escape_value(to_string(other)) + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/service_live/index.ex b/web-ng/lib/serviceradar_web_ng_web/live/service_live/index.ex new file mode 100644 index 000000000..8f0d2b967 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/service_live/index.ex @@ -0,0 +1,887 @@ +defmodule ServiceRadarWebNGWeb.ServiceLive.Index do + use ServiceRadarWebNGWeb, :live_view + + import ServiceRadarWebNGWeb.UIComponents + + alias Phoenix.LiveView.JS + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + @default_limit 50 + @max_limit 200 + @summary_window "last_1h" + @summary_limit 2000 + @pollers_default_limit 10 + @pollers_max_limit 50 + + @impl true + def mount(_params, _session, socket) do + {:ok, + socket + |> assign(:page_title, "Services") + |> assign(:services, []) + |> assign(:summary, %{total: 0, available: 0, unavailable: 0, by_type: %{}, check_count: 0}) + |> assign(:limit, @default_limit) + |> assign(:params, %{}) + |> assign(:pollers, []) + |> assign(:pollers_limit, @pollers_default_limit) + |> assign(:pollers_pagination, %{"prev_cursor" => nil, "next_cursor" => nil}) + |> SRQLPage.init("services", default_limit: @default_limit)} + end + + @impl true + def handle_params(params, uri, socket) do + socket = + socket + |> SRQLPage.load_list(params, uri, :services, + default_limit: @default_limit, + max_limit: @max_limit + ) + |> assign(:params, params) + + # Compute summary from a bounded recent window, so the "By Service Type" panel reflects more + # than just the current page of results (but remains scale-safe). + summary = load_summary(socket) + + socket = + socket + |> assign(:summary, summary) + |> load_pollers(params) + + {:noreply, socket} + end + + @impl true + def handle_event("srql_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_change", params)} + end + + def handle_event("srql_submit", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_submit", params, fallback_path: "/services")} + end + + def handle_event("srql_builder_toggle", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_toggle", %{}, entity: "services")} + end + + def handle_event("srql_builder_change", params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_change", params)} + end + + def handle_event("srql_builder_apply", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_apply", %{})} + end + + def handle_event("srql_builder_run", _params, socket) do + {:noreply, SRQLPage.handle_event(socket, "srql_builder_run", %{}, fallback_path: "/services")} + end + + def handle_event("srql_builder_add_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_add_filter", params, entity: "services")} + end + + def handle_event("srql_builder_remove_filter", params, socket) do + {:noreply, + SRQLPage.handle_event(socket, "srql_builder_remove_filter", params, entity: "services")} + end + + @impl true + def render(assigns) do + pagination = get_in(assigns, [:srql, :pagination]) || %{} + query = Map.get(assigns.srql, :query, "") + has_filter = is_binary(query) and Regex.match?(~r/(?:^|\s)(?:service_type|type):/, query) + pollers_pagination = Map.get(assigns, :pollers_pagination, %{}) || %{} + + assigns = + assigns + |> assign(:pagination, pagination) + |> assign(:has_filter, has_filter) + |> assign(:pollers_pagination, pollers_pagination) + + ~H""" + +
+
+ <.pollers_panel + params={@params} + pollers={@pollers} + pagination={@pollers_pagination} + limit={@pollers_limit} + /> + <.service_summary summary={@summary} has_filter={@has_filter} /> + + <.ui_panel> + <:header> +
+
Service Checks
+
+ Recent status checks from this page ({length(@services)} results). +
+
+ + + <.services_table id="services" services={@services} /> + +
+ <.ui_pagination + prev_cursor={Map.get(@pagination, "prev_cursor")} + next_cursor={Map.get(@pagination, "next_cursor")} + base_path="/services" + query={Map.get(@srql, :query, "")} + limit={@limit} + result_count={length(@services)} + /> +
+ +
+
+
+ """ + end + + attr :params, :map, default: %{} + attr :pollers, :list, default: [] + attr :pagination, :map, default: %{} + attr :limit, :integer, default: @pollers_default_limit + + defp pollers_panel(assigns) do + pollers = assigns.pollers || [] + prev_cursor = Map.get(assigns.pagination, "prev_cursor") + next_cursor = Map.get(assigns.pagination, "next_cursor") + + assigns = + assigns + |> assign(:pollers, pollers) + |> assign(:prev_cursor, prev_cursor) + |> assign(:next_cursor, next_cursor) + |> assign(:has_prev, is_binary(prev_cursor) and prev_cursor != "") + |> assign(:has_next, is_binary(next_cursor) and next_cursor != "") + |> assign(:showing_text, pollers_pagination_text(length(pollers))) + + ~H""" + <.ui_panel> + <:header> +
+
Pollers
+
+ Pollers self-report and may not show up in service checks. +
+
+ <.link + href={~p"/pollers"} + class="text-base-content/60 hover:text-primary" + title="View all pollers" + > + <.icon name="hero-arrow-top-right-on-square" class="size-4" /> + + + +
+ + + + + + + + + + + + + + + <%= for {poller, idx} <- Enum.with_index(@pollers) do %> + + + + + + + <% end %> + +
+ Poller ID + + Status + + Address + + Last Seen +
+ No pollers found. +
+ {poller_id(poller)} + + <.poller_status_badge poller={poller} /> + + {poller_address(poller)} + + {poller_last_seen(poller)} +
+
+ +
+
{@showing_text}
+
+ <.link + :if={@has_prev} + patch={pollers_page_href(@params, @limit, @prev_cursor)} + class="join-item btn btn-sm btn-outline" + > + <.icon name="hero-chevron-left" class="size-4" /> Previous + + + + <.link + :if={@has_next} + patch={pollers_page_href(@params, @limit, @next_cursor)} + class="join-item btn btn-sm btn-outline" + > + Next <.icon name="hero-chevron-right" class="size-4" /> + + +
+
+ + """ + end + + attr :poller, :map, required: true + + defp poller_status_badge(assigns) do + active = Map.get(assigns.poller, "is_active") + + {label, variant} = + case active do + true -> {"Active", "success"} + false -> {"Inactive", "error"} + _ -> {"Unknown", "ghost"} + end + + assigns = assign(assigns, :label, label) |> assign(:variant, variant) + + ~H""" + <.ui_badge variant={@variant} size="xs">{@label} + """ + end + + defp poller_id(%{} = poller) do + Map.get(poller, "poller_id") || Map.get(poller, "id") || "unknown" + end + + defp poller_id(_), do: "unknown" + + defp poller_address(%{} = poller) do + Map.get(poller, "address") || + Map.get(poller, "poller_address") || + Map.get(poller, "host") || + Map.get(poller, "hostname") || + Map.get(poller, "ip") || + Map.get(poller, "ip_address") || + "—" + end + + defp poller_address(_), do: "—" + + defp poller_last_seen(%{} = poller) do + ts = Map.get(poller, "last_seen") || Map.get(poller, "updated_at") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S") + _ -> ts || "—" + end + end + + defp poller_last_seen(_), do: "—" + + defp pollers_pagination_text(count) when is_integer(count) and count > 0 do + "Showing #{count} poller#{if count != 1, do: "s", else: ""}" + end + + defp pollers_pagination_text(_), do: "No pollers" + + defp pollers_page_href(params, limit, cursor) do + base = + params + |> normalize_params() + |> Map.put("pollers_limit", limit) + |> Map.put("pollers_cursor", cursor) + |> Map.reject(fn {_k, v} -> is_nil(v) or v == "" end) + + qs = URI.encode_query(base) + if qs == "", do: "/services", else: "/services?" <> qs + end + + defp normalize_params(%{} = params) do + params + |> Enum.reduce(%{}, fn + {k, v}, acc when is_atom(k) -> Map.put(acc, Atom.to_string(k), v) + {k, v}, acc when is_binary(k) -> Map.put(acc, k, v) + _, acc -> acc + end) + end + + defp normalize_params(_), do: %{} + + defp load_pollers(socket, params) when is_map(params) do + limit = parse_pollers_limit(Map.get(params, "pollers_limit")) + cursor = normalize_optional_string(Map.get(params, "pollers_cursor")) + query = "in:pollers sort:last_seen:desc limit:#{limit}" + + case srql_module().query(query, %{cursor: cursor, limit: limit}) do + {:ok, %{"results" => results} = resp} when is_list(results) -> + pagination = + case Map.get(resp, "pagination") do + %{} = pag -> pag + _ -> %{} + end + + socket + |> assign(:pollers, results) + |> assign(:pollers_limit, limit) + |> assign(:pollers_pagination, pagination) + + _ -> + socket + |> assign(:pollers, []) + |> assign(:pollers_limit, limit) + |> assign(:pollers_pagination, %{"prev_cursor" => nil, "next_cursor" => nil}) + end + end + + defp load_pollers(socket, _), do: socket + + defp parse_pollers_limit(nil), do: @pollers_default_limit + + defp parse_pollers_limit(value) when is_integer(value) and value > 0, + do: min(value, @pollers_max_limit) + + defp parse_pollers_limit(value) when is_binary(value) do + case Integer.parse(String.trim(value)) do + {n, ""} -> parse_pollers_limit(n) + _ -> @pollers_default_limit + end + end + + defp parse_pollers_limit(_), do: @pollers_default_limit + + defp normalize_optional_string(nil), do: nil + defp normalize_optional_string(""), do: nil + defp normalize_optional_string(value) when is_binary(value), do: value + defp normalize_optional_string(_), do: nil + + attr :summary, :map, required: true + attr :has_filter, :boolean, default: false + + defp service_summary(assigns) do + total = assigns.summary.total + available = assigns.summary.available + unavailable = assigns.summary.unavailable + by_type = assigns.summary.by_type + check_count = Map.get(assigns.summary, :check_count, 0) + + # Calculate availability percentage + avail_pct = if total > 0, do: round(available / total * 100), else: 0 + + has_filter = Map.get(assigns, :has_filter, false) + + max_type_total = + by_type + |> Map.values() + |> Enum.map(fn counts -> + Map.get(counts, :available, 0) + Map.get(counts, :unavailable, 0) + end) + |> case do + [] -> 0 + values -> Enum.max(values) + end + + assigns = + assigns + |> assign(:total, total) + |> assign(:available, available) + |> assign(:unavailable, unavailable) + |> assign(:avail_pct, avail_pct) + |> assign(:by_type, by_type) + |> assign(:check_count, check_count) + |> assign(:has_filter, has_filter) + |> assign(:max_type_total, max_type_total) + + ~H""" +
+
+
+
+
+ Unique Services +
+
{@total}
+
from {@check_count} checks
+
+
+ + + +
+
+
+ +
+
+
+
Available
+
{@available}
+
{@avail_pct}% healthy
+
+
+ + + +
+
+
+ +
+
+
+
Unavailable
+
0 && "text-error"]}>{@unavailable}
+
{100 - @avail_pct}% failing
+
+
0 && "bg-error/10", + @unavailable == 0 && "bg-base-200/50" + ]}> + 0 && "text-error", + @unavailable == 0 && "text-base-content/40" + ]} + fill="none" + viewBox="0 0 24 24" + stroke="currentColor" + > + + +
+
+
+
+ +
0} + class="rounded-xl border border-base-200 bg-base-100 shadow-sm p-4" + > +
+
+
By Service Type
+ <.link + :if={@has_filter} + patch={~p"/services"} + class="text-xs text-primary hover:underline" + > + (Reset) + +
+
+ <.link + patch={ + ~p"/services?#{%{q: "in:services available:false time:last_1h sort:timestamp:desc"}}" + } + class="btn btn-ghost btn-xs text-error" + > + Failing Only + +
+
+
+ <%= for {type, counts} <- Enum.sort_by(@by_type, fn {_, c} -> -(c.available + c.unavailable) end) |> Enum.take(8) do %> + <.type_bar type={type} counts={counts} max_total={@max_type_total} /> + <% end %> +
+
+ """ + end + + attr :type, :string, required: true + attr :counts, :map, required: true + attr :max_total, :integer, required: true + + defp type_bar(assigns) do + type_total = assigns.counts.available + assigns.counts.unavailable + avail_pct = if type_total > 0, do: round(assigns.counts.available / type_total * 100), else: 0 + fail_pct = if type_total > 0, do: 100 - avail_pct, else: 0 + + volume_pct = + cond do + type_total <= 0 -> 0 + assigns.max_total <= 0 -> 100 + true -> max(6, round(type_total / assigns.max_total * 100)) + end + + # Build SRQL query for this service type + type_query = "in:services service_type:\"#{assigns.type}\" time:last_1h sort:timestamp:desc" + + assigns = + assigns + |> assign(:type_total, type_total) + |> assign(:avail_pct, avail_pct) + |> assign(:fail_pct, fail_pct) + |> assign(:volume_pct, volume_pct) + |> assign(:type_query, type_query) + + ~H""" + <.link + patch={~p"/services?#{%{q: @type_query}}"} + class="flex items-center gap-3 p-1.5 -mx-1.5 rounded-lg hover:bg-base-200/50 transition-colors cursor-pointer group" + title={"Filter by #{@type}"} + > +
+ {@type} +
+
+
+
+
0} + class="absolute inset-y-0 right-0 bg-error/70" + style={"width: #{@fail_pct}%"} + title={"#{@counts.unavailable} unavailable"} + /> +
+
+
+
+ {@type_total} + ({@fail_pct}% fail) +
+ + """ + end + + attr :id, :string, required: true + attr :services, :list, default: [] + + defp services_table(assigns) do + ~H""" +
+ + + + + + + + + + + + + + + + <%= for {svc, idx} <- Enum.with_index(@services) do %> + + + + + + + + <% end %> + +
+ Time + + Status + + Type + + Service + + Message +
+ No services found. +
+ {format_timestamp(svc)} + + <.status_badge available={Map.get(svc, "available")} /> + + {service_type_value(svc) || "—"} + + {service_name_value(svc) || "—"} + + {Map.get(svc, "message") || "—"} +
+
+ """ + end + + attr :available, :any, default: nil + + defp status_badge(assigns) do + available = normalize_available(assigns.available) + + {label, variant} = + case available do + true -> {"OK", "success"} + false -> {"FAIL", "error"} + _ -> {"—", "ghost"} + end + + assigns = assign(assigns, :label, label) |> assign(:variant, variant) + + ~H""" + <.ui_badge variant={@variant} size="xs">{@label} + """ + end + + defp format_timestamp(svc) do + ts = Map.get(svc, "timestamp") + + case parse_timestamp(ts) do + {:ok, dt} -> Calendar.strftime(dt, "%Y-%m-%d %H:%M:%S") + _ -> ts || "—" + end + end + + defp parse_timestamp(nil), do: :error + defp parse_timestamp(""), do: :error + + defp parse_timestamp(value) when is_binary(value) do + value = String.trim(value) + + case DateTime.from_iso8601(value) do + {:ok, dt, _offset} -> + {:ok, dt} + + {:error, _} -> + case NaiveDateTime.from_iso8601(value) do + {:ok, ndt} -> {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + {:error, _} -> :error + end + end + end + + defp parse_timestamp(_), do: :error + + defp load_summary(socket) do + current_query = socket.assigns |> Map.get(:srql, %{}) |> Map.get(:query) + summary_query = summary_query_for(current_query) + + case srql_module().query(summary_query, %{limit: @summary_limit}) do + {:ok, %{"results" => results}} when is_list(results) -> + compute_summary(results) + + _ -> + compute_summary(socket.assigns.services) + end + end + + defp summary_query_for(nil), do: "in:services time:#{@summary_window} sort:timestamp:desc" + + defp summary_query_for(query) when is_binary(query) do + trimmed = String.trim(query) + + cond do + trimmed == "" -> + "in:services time:#{@summary_window} sort:timestamp:desc" + + String.contains?(trimmed, "in:services") -> + trimmed + |> strip_tokens_for_summary() + |> ensure_summary_time_filter() + |> ensure_summary_sort() + + true -> + "in:services time:#{@summary_window} sort:timestamp:desc" + end + end + + defp summary_query_for(_), do: "in:services time:#{@summary_window} sort:timestamp:desc" + + defp strip_tokens_for_summary(query) do + query = Regex.replace(~r/(?:^|\s)limit:\S+/, query, "") + query = Regex.replace(~r/(?:^|\s)sort:\S+/, query, "") + query = Regex.replace(~r/(?:^|\s)cursor:\S+/, query, "") + query |> String.trim() |> String.replace(~r/\s+/, " ") + end + + defp ensure_summary_time_filter(query) do + if Regex.match?(~r/(?:^|\s)time:\S+/, query) do + Regex.replace(~r/(?:^|\s)time:\S+/, query, " time:#{@summary_window}") + |> String.trim() + else + "#{query} time:#{@summary_window}" + end + end + + defp ensure_summary_sort(query) do + if Regex.match?(~r/(?:^|\s)sort:\S+/, query) do + query + else + "#{query} sort:timestamp:desc" + end + end + + # Compute summary stats from unique service instances (deduplicated by poller/agent + service identity) + # This prevents showing N status checks for the same service instance as "N services". + # + # Note: `in:services` is backed by the `service_status` table, which does NOT include `device_id`. + defp compute_summary(services) when is_list(services) do + # Deduplicate by poller_id + agent_id + service_type + service_name, keeping most recent + # (first in sorted list). + unique_services = + services + |> Enum.filter(&is_map/1) + |> Enum.reduce(%{}, fn svc, acc -> + poller_id = Map.get(svc, "poller_id") || "" + agent_id = Map.get(svc, "agent_id") || "" + service_type = service_type_value(svc) || "" + service_name = service_name_value(svc) || "" + + key = "#{poller_id}:#{agent_id}:#{service_type}:#{service_name}" + + # Keep first occurrence (most recent if sorted by timestamp desc) + Map.put_new(acc, key, svc) + end) + |> Map.values() + + # Now compute summary from unique services only + initial = %{ + total: 0, + available: 0, + unavailable: 0, + by_type: %{}, + check_count: length(services) + } + + result = + Enum.reduce(unique_services, initial, fn svc, acc -> + is_available = normalize_available(Map.get(svc, "available")) == true + + service_type = + svc + |> service_type_value() + |> case do + nil -> "unknown" + "" -> "unknown" + v -> v |> to_string() |> String.trim() |> String.downcase() + end + + by_type = + Map.update(acc.by_type, service_type, %{available: 0, unavailable: 0}, fn counts -> + if is_available do + Map.update!(counts, :available, &(&1 + 1)) + else + Map.update!(counts, :unavailable, &(&1 + 1)) + end + end) + + %{ + acc + | total: acc.total + 1, + available: acc.available + if(is_available, do: 1, else: 0), + unavailable: acc.unavailable + if(is_available, do: 0, else: 1), + by_type: by_type + } + end) + + result + end + + defp compute_summary(_), + do: %{total: 0, available: 0, unavailable: 0, by_type: %{}, check_count: 0} + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end + + defp normalize_available(true), do: true + defp normalize_available(false), do: false + defp normalize_available(1), do: true + defp normalize_available(0), do: false + + defp normalize_available(value) when is_binary(value) do + case String.trim(String.downcase(value)) do + "true" -> true + "t" -> true + "1" -> true + "false" -> false + "f" -> false + "0" -> false + _ -> nil + end + end + + defp normalize_available(_), do: nil + + defp service_type_value(%{} = svc) do + Map.get(svc, "service_type") || + Map.get(svc, "type") || + Map.get(svc, "check_type") || + Map.get(svc, "service_kind") + end + + defp service_type_value(_), do: nil + + defp service_name_value(%{} = svc) do + Map.get(svc, "service_name") || + Map.get(svc, "name") || + Map.get(svc, "service") || + Map.get(svc, "check_name") + end + + defp service_name_value(_), do: nil +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/user_live/confirmation.ex b/web-ng/lib/serviceradar_web_ng_web/live/user_live/confirmation.ex new file mode 100644 index 000000000..eaf91758f --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/user_live/confirmation.ex @@ -0,0 +1,94 @@ +defmodule ServiceRadarWebNGWeb.UserLive.Confirmation do + use ServiceRadarWebNGWeb, :live_view + + alias ServiceRadarWebNG.Accounts + + @impl true + def render(assigns) do + ~H""" + +
+
+ <.header>Welcome {@user.email} +
+ + <.form + :if={!@user.confirmed_at} + for={@form} + id="confirmation_form" + phx-mounted={JS.focus_first()} + phx-submit="submit" + action={~p"/users/log-in?_action=confirmed"} + phx-trigger-action={@trigger_submit} + > + + <.button + name={@form[:remember_me].name} + value="true" + phx-disable-with="Confirming..." + class="btn btn-primary w-full" + > + Confirm and stay logged in + + <.button phx-disable-with="Confirming..." class="btn btn-primary btn-soft w-full mt-2"> + Confirm and log in only this time + + + + <.form + :if={@user.confirmed_at} + for={@form} + id="login_form" + phx-submit="submit" + phx-mounted={JS.focus_first()} + action={~p"/users/log-in"} + phx-trigger-action={@trigger_submit} + > + + <%= if @current_scope do %> + <.button phx-disable-with="Logging in..." class="btn btn-primary w-full"> + Log in + + <% else %> + <.button + name={@form[:remember_me].name} + value="true" + phx-disable-with="Logging in..." + class="btn btn-primary w-full" + > + Keep me logged in on this device + + <.button phx-disable-with="Logging in..." class="btn btn-primary btn-soft w-full mt-2"> + Log me in only this time + + <% end %> + + +

+ Tip: If you prefer passwords, you can enable them in the user settings. +

+
+
+ """ + end + + @impl true + def mount(%{"token" => token}, _session, socket) do + if user = Accounts.get_user_by_magic_link_token(token) do + form = to_form(%{"token" => token}, as: "user") + + {:ok, assign(socket, user: user, form: form, trigger_submit: false), + temporary_assigns: [form: nil]} + else + {:ok, + socket + |> put_flash(:error, "Magic link is invalid or it has expired.") + |> push_navigate(to: ~p"/users/log-in")} + end + end + + @impl true + def handle_event("submit", %{"user" => params}, socket) do + {:noreply, assign(socket, form: to_form(params, as: "user"), trigger_submit: true)} + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/user_live/login.ex b/web-ng/lib/serviceradar_web_ng_web/live/user_live/login.ex new file mode 100644 index 000000000..ed78490f5 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/user_live/login.ex @@ -0,0 +1,132 @@ +defmodule ServiceRadarWebNGWeb.UserLive.Login do + use ServiceRadarWebNGWeb, :live_view + + alias ServiceRadarWebNG.Accounts + + @impl true + def render(assigns) do + ~H""" + +
+
+ <.header> +

Log in

+ <:subtitle> + <%= if @current_scope do %> + You need to reauthenticate to perform sensitive actions on your account. + <% else %> + Don't have an account? <.link + navigate={~p"/users/register"} + class="font-semibold text-brand hover:underline" + phx-no-format + >Sign up for an account now. + <% end %> + + +
+ +
+ <.icon name="hero-information-circle" class="size-6 shrink-0" /> +
+

You are running the local mail adapter.

+

+ To see sent emails, visit <.link href="/dev/mailbox" class="underline">the mailbox page. +

+
+
+ + <.form + :let={f} + for={@form} + id="login_form_magic" + action={~p"/users/log-in"} + phx-submit="submit_magic" + > + <.input + readonly={!!@current_scope} + field={f[:email]} + type="email" + label="Email" + autocomplete="email" + required + phx-mounted={JS.focus()} + /> + <.button class="btn btn-primary w-full"> + Log in with email + + + +
or
+ + <.form + :let={f} + for={@form} + id="login_form_password" + action={~p"/users/log-in"} + phx-submit="submit_password" + phx-trigger-action={@trigger_submit} + > + <.input + readonly={!!@current_scope} + field={f[:email]} + type="email" + label="Email" + autocomplete="email" + required + /> + <.input + field={@form[:password]} + type="password" + label="Password" + autocomplete="current-password" + /> + <.button class="btn btn-primary w-full" name={@form[:remember_me].name} value="true"> + Log in and stay logged in + + <.button class="btn btn-primary btn-soft w-full mt-2"> + Log in only this time + + +
+
+ """ + end + + @impl true + def mount(_params, _session, socket) do + email = + Phoenix.Flash.get(socket.assigns.flash, :email) || + get_in(socket.assigns, [:current_scope, Access.key(:user), Access.key(:email)]) + + form = to_form(%{"email" => email}, as: "user") + + {:ok, assign(socket, form: form, trigger_submit: false)} + end + + @impl true + def handle_event("submit_password", _params, socket) do + {:noreply, assign(socket, :trigger_submit, true)} + end + + def handle_event("submit_magic", %{"user" => %{"email" => email}}, socket) do + if user = Accounts.get_user_by_email(email) do + Accounts.deliver_login_instructions( + user, + &url(~p"/users/log-in/#{&1}") + ) + end + + info = + "If your email is in our system, you will receive instructions for logging in shortly." + + {:noreply, + socket + |> put_flash(:info, info) + |> push_navigate(to: ~p"/users/log-in")} + end + + defp local_mail_adapter? do + Application.get_env(:serviceradar_web_ng, ServiceRadarWebNG.Mailer)[:adapter] == + Swoosh.Adapters.Local + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/user_live/registration.ex b/web-ng/lib/serviceradar_web_ng_web/live/user_live/registration.ex new file mode 100644 index 000000000..8930fc384 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/user_live/registration.ex @@ -0,0 +1,88 @@ +defmodule ServiceRadarWebNGWeb.UserLive.Registration do + use ServiceRadarWebNGWeb, :live_view + + alias ServiceRadarWebNG.Accounts + alias ServiceRadarWebNG.Accounts.User + + @impl true + def render(assigns) do + ~H""" + +
+
+ <.header> + Register for an account + <:subtitle> + Already registered? + <.link navigate={~p"/users/log-in"} class="font-semibold text-brand hover:underline"> + Log in + + to your account now. + + +
+ + <.form for={@form} id="registration_form" phx-submit="save" phx-change="validate"> + <.input + field={@form[:email]} + type="email" + label="Email" + autocomplete="username" + required + phx-mounted={JS.focus()} + /> + + <.button phx-disable-with="Creating account..." class="btn btn-primary w-full"> + Create an account + + +
+
+ """ + end + + @impl true + def mount(_params, _session, %{assigns: %{current_scope: %{user: user}}} = socket) + when not is_nil(user) do + {:ok, redirect(socket, to: ServiceRadarWebNGWeb.UserAuth.signed_in_path(socket))} + end + + def mount(_params, _session, socket) do + changeset = Accounts.change_user_email(%User{}, %{}, validate_unique: false) + + {:ok, assign_form(socket, changeset), temporary_assigns: [form: nil]} + end + + @impl true + def handle_event("save", %{"user" => user_params}, socket) do + case Accounts.register_user(user_params) do + {:ok, user} -> + {:ok, _} = + Accounts.deliver_login_instructions( + user, + &url(~p"/users/log-in/#{&1}") + ) + + {:noreply, + socket + |> put_flash( + :info, + "An email was sent to #{user.email}, please access it to confirm your account." + ) + |> push_navigate(to: ~p"/users/log-in")} + + {:error, %Ecto.Changeset{} = changeset} -> + {:noreply, assign_form(socket, changeset)} + end + end + + def handle_event("validate", %{"user" => user_params}, socket) do + changeset = Accounts.change_user_email(%User{}, user_params, validate_unique: false) + {:noreply, assign_form(socket, Map.put(changeset, :action, :validate))} + end + + defp assign_form(socket, %Ecto.Changeset{} = changeset) do + form = to_form(changeset, as: "user") + assign(socket, form: form) + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/live/user_live/settings.ex b/web-ng/lib/serviceradar_web_ng_web/live/user_live/settings.ex new file mode 100644 index 000000000..4e50586ad --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/live/user_live/settings.ex @@ -0,0 +1,157 @@ +defmodule ServiceRadarWebNGWeb.UserLive.Settings do + use ServiceRadarWebNGWeb, :live_view + + on_mount {ServiceRadarWebNGWeb.UserAuth, :require_sudo_mode} + + alias ServiceRadarWebNG.Accounts + + @impl true + def render(assigns) do + ~H""" + +
+ <.header> + Account Settings + <:subtitle>Manage your account email address and password settings + +
+ + <.form for={@email_form} id="email_form" phx-submit="update_email" phx-change="validate_email"> + <.input + field={@email_form[:email]} + type="email" + label="Email" + autocomplete="username" + required + /> + <.button variant="primary" phx-disable-with="Changing...">Change Email + + +
+ + <.form + for={@password_form} + id="password_form" + action={~p"/users/update-password"} + method="post" + phx-change="validate_password" + phx-submit="update_password" + phx-trigger-action={@trigger_submit} + > + + <.input + field={@password_form[:password]} + type="password" + label="New password" + autocomplete="new-password" + required + /> + <.input + field={@password_form[:password_confirmation]} + type="password" + label="Confirm new password" + autocomplete="new-password" + /> + <.button variant="primary" phx-disable-with="Saving..."> + Save Password + + + + """ + end + + @impl true + def mount(%{"token" => token}, _session, socket) do + socket = + case Accounts.update_user_email(socket.assigns.current_scope.user, token) do + {:ok, _user} -> + put_flash(socket, :info, "Email changed successfully.") + + {:error, _} -> + put_flash(socket, :error, "Email change link is invalid or it has expired.") + end + + {:ok, push_navigate(socket, to: ~p"/users/settings")} + end + + def mount(_params, _session, socket) do + user = socket.assigns.current_scope.user + email_changeset = Accounts.change_user_email(user, %{}, validate_unique: false) + password_changeset = Accounts.change_user_password(user, %{}, hash_password: false) + + socket = + socket + |> assign(:current_email, user.email) + |> assign(:email_form, to_form(email_changeset)) + |> assign(:password_form, to_form(password_changeset)) + |> assign(:trigger_submit, false) + + {:ok, socket} + end + + @impl true + def handle_event("validate_email", params, socket) do + %{"user" => user_params} = params + + email_form = + socket.assigns.current_scope.user + |> Accounts.change_user_email(user_params, validate_unique: false) + |> Map.put(:action, :validate) + |> to_form() + + {:noreply, assign(socket, email_form: email_form)} + end + + def handle_event("update_email", params, socket) do + %{"user" => user_params} = params + user = socket.assigns.current_scope.user + true = Accounts.sudo_mode?(user) + + case Accounts.change_user_email(user, user_params) do + %{valid?: true} = changeset -> + Accounts.deliver_user_update_email_instructions( + Ecto.Changeset.apply_action!(changeset, :insert), + user.email, + &url(~p"/users/settings/confirm-email/#{&1}") + ) + + info = "A link to confirm your email change has been sent to the new address." + {:noreply, socket |> put_flash(:info, info)} + + changeset -> + {:noreply, assign(socket, :email_form, to_form(changeset, action: :insert))} + end + end + + def handle_event("validate_password", params, socket) do + %{"user" => user_params} = params + + password_form = + socket.assigns.current_scope.user + |> Accounts.change_user_password(user_params, hash_password: false) + |> Map.put(:action, :validate) + |> to_form() + + {:noreply, assign(socket, password_form: password_form)} + end + + def handle_event("update_password", params, socket) do + %{"user" => user_params} = params + user = socket.assigns.current_scope.user + true = Accounts.sudo_mode?(user) + + case Accounts.change_user_password(user, user_params) do + %{valid?: true} = changeset -> + {:noreply, assign(socket, trigger_submit: true, password_form: to_form(changeset))} + + changeset -> + {:noreply, assign(socket, password_form: to_form(changeset, action: :insert))} + end + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/plugs/safe_parsers.ex b/web-ng/lib/serviceradar_web_ng_web/plugs/safe_parsers.ex new file mode 100644 index 000000000..a1b8e16fd --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/plugs/safe_parsers.ex @@ -0,0 +1,34 @@ +defmodule ServiceRadarWebNGWeb.Plugs.SafeParsers do + @moduledoc false + + @behaviour Plug + + @impl true + def init(opts), do: Plug.Parsers.init(opts) + + @impl true + def call(conn, opts) do + try do + Plug.Parsers.call(conn, opts) + rescue + _err in [Plug.Parsers.ParseError] -> + send_malformed_request(conn) + end + end + + defp send_malformed_request(conn) do + json = Phoenix.json_library() + + body = + try do + json.encode!(%{error: "malformed_request"}) + rescue + _ -> "{\"error\":\"malformed_request\"}" + end + + conn + |> Plug.Conn.put_resp_content_type("application/json") + |> Plug.Conn.send_resp(400, body) + |> Plug.Conn.halt() + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/router.ex b/web-ng/lib/serviceradar_web_ng_web/router.ex new file mode 100644 index 000000000..80e369ea9 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/router.ex @@ -0,0 +1,103 @@ +defmodule ServiceRadarWebNGWeb.Router do + use ServiceRadarWebNGWeb, :router + + import ServiceRadarWebNGWeb.UserAuth + + pipeline :browser do + plug :accepts, ["html"] + plug :fetch_session + plug :fetch_live_flash + plug :put_root_layout, html: {ServiceRadarWebNGWeb.Layouts, :root} + plug :protect_from_forgery + plug :put_secure_browser_headers + plug :fetch_current_scope_for_user + end + + pipeline :api do + plug :accepts, ["json"] + end + + pipeline :api_auth do + plug :accepts, ["json"] + plug :fetch_session + plug :protect_from_forgery + plug :fetch_current_scope_for_user + plug :require_authenticated_user + end + + scope "/", ServiceRadarWebNGWeb do + pipe_through :browser + + get "/", PageController, :home + end + + # Other scopes may use custom stacks. + scope "/api", ServiceRadarWebNG.Api do + pipe_through :api_auth + + post "/query", QueryController, :execute + get "/devices", DeviceController, :index + get "/devices/:device_id", DeviceController, :show + end + + # Enable LiveDashboard and Swoosh mailbox preview in development + if Application.compile_env(:serviceradar_web_ng, :dev_routes) do + # If you want to use the LiveDashboard in production, you should put + # it behind authentication and allow only admins to access it. + # If your application does not have an admins-only section yet, + # you can use Plug.BasicAuth to set up some basic authentication + # as long as you are also using SSL (which you should anyway). + import Phoenix.LiveDashboard.Router + + scope "/dev" do + pipe_through :browser + + live_dashboard "/dashboard", metrics: ServiceRadarWebNGWeb.Telemetry + forward "/mailbox", Plug.Swoosh.MailboxPreview + end + end + + ## Authentication routes + + scope "/", ServiceRadarWebNGWeb do + pipe_through [:browser, :require_authenticated_user] + + # Redirect /dashboard to /analytics + get "/dashboard", PageController, :redirect_to_analytics + + live_session :require_authenticated_user, + on_mount: [{ServiceRadarWebNGWeb.UserAuth, :require_authenticated}] do + live "/analytics", AnalyticsLive.Index, :index + live "/devices", DeviceLive.Index, :index + live "/devices/:device_id", DeviceLive.Show, :show + live "/pollers", PollerLive.Index, :index + live "/pollers/:poller_id", PollerLive.Show, :show + live "/events", EventLive.Index, :index + live "/events/:event_id", EventLive.Show, :show + live "/observability", LogLive.Index, :index + live "/observability/metrics/:span_id", MetricLive.Show, :show + live "/logs", LogLive.Index, :index + live "/logs/:log_id", LogLive.Show, :show + live "/services", ServiceLive.Index, :index + live "/interfaces", InterfaceLive.Index, :index + live "/users/settings", UserLive.Settings, :edit + live "/users/settings/confirm-email/:token", UserLive.Settings, :confirm_email + end + + post "/users/update-password", UserSessionController, :update_password + end + + scope "/", ServiceRadarWebNGWeb do + pipe_through [:browser] + + live_session :current_user, + on_mount: [{ServiceRadarWebNGWeb.UserAuth, :mount_current_scope}] do + live "/users/register", UserLive.Registration, :new + live "/users/log-in", UserLive.Login, :new + live "/users/log-in/:token", UserLive.Confirmation, :new + end + + post "/users/log-in", UserSessionController, :create + delete "/users/log-out", UserSessionController, :delete + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/srql/builder.ex b/web-ng/lib/serviceradar_web_ng_web/srql/builder.ex new file mode 100644 index 000000000..92cf21c7c --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/srql/builder.ex @@ -0,0 +1,596 @@ +defmodule ServiceRadarWebNGWeb.SRQL.Builder do + @moduledoc false + + alias ServiceRadarWebNGWeb.SRQL.Catalog + + @max_limit 500 + @allowed_filter_ops ["contains", "not_contains", "equals", "not_equals"] + @allowed_downsample_aggs ["avg", "min", "max", "sum", "count"] + + @type state :: map() + + def default_state(entity, limit \\ 100) when is_binary(entity) do + config = Catalog.entity(entity) + + %{ + "entity" => config.id, + "time" => config.default_time || "", + "bucket" => config[:default_bucket] || "", + "agg" => config[:default_agg] || "avg", + "series" => config[:default_series_field] || "", + "sort_field" => config.default_sort_field, + "sort_dir" => config.default_sort_dir, + "limit" => normalize_limit(limit), + "filters" => [ + %{ + "field" => config.default_filter_field, + "op" => "contains", + "value" => "" + } + ] + } + end + + def build(%{} = state) do + entity = Map.get(state, "entity", "devices") + time = Map.get(state, "time", "") + bucket = Map.get(state, "bucket", "") + agg = Map.get(state, "agg", "avg") + series = Map.get(state, "series", "") + sort_field = Map.get(state, "sort_field", default_sort_field(entity)) + sort_dir = Map.get(state, "sort_dir", "desc") + limit = normalize_limit(Map.get(state, "limit", 100)) + filters = normalize_filters(entity, Map.get(state, "filters", [])) + + tokens = + ["in:#{entity}"] + |> maybe_add_time(time) + |> maybe_add_downsample(entity, time, bucket, agg, series) + |> maybe_add_filters(filters) + |> maybe_add_sort(sort_field, sort_dir) + |> Kernel.++(["limit:#{limit}"]) + + Enum.join(tokens, " ") + end + + def update(%{} = state, %{} = params) do + state + |> Map.merge(stringify_map(params)) + |> normalize_state() + end + + def parse(query) when is_binary(query) do + tokens = + query + |> String.trim() + |> String.split(~r/\s+/, trim: true) + + with {:ok, parts} <- parse_tokens(tokens), + :ok <- reject_unknown_tokens(tokens, parts), + :ok <- validate_filter_fields(parts.entity, parts.filters), + :ok <- validate_downsample(parts.entity, parts.bucket, parts.agg, parts.series) do + {:ok, + %{ + "entity" => parts.entity, + "time" => parts.time, + "bucket" => parts.bucket, + "agg" => parts.agg, + "series" => parts.series, + "sort_field" => parts.sort_field, + "sort_dir" => parts.sort_dir, + "limit" => parts.limit, + "filters" => parts.filters + } + |> normalize_state()} + end + end + + def parse(_), do: {:error, :invalid_query} + + defp normalize_state(%{} = state) do + entity = + state + |> Map.get("entity", "devices") + |> safe_to_string() + |> String.trim() + |> case do + "" -> "devices" + value -> value + end + + config = Catalog.entity(entity) + + sort_dir = + case Map.get(state, "sort_dir") do + "asc" -> "asc" + _ -> "desc" + end + + filters = normalize_filters(entity, Map.get(state, "filters", [])) + + bucket = normalize_bucket(config, Map.get(state, "bucket")) + agg = normalize_agg(config, Map.get(state, "agg")) + series = normalize_series_field(config, Map.get(state, "series")) + + time = + state + |> Map.get("time", "") + |> normalize_time() + |> ensure_downsample_time(config, bucket) + + %{ + "entity" => config.id, + "time" => time, + "bucket" => bucket, + "agg" => agg, + "series" => series, + "sort_field" => normalize_sort_field(entity, Map.get(state, "sort_field")), + "sort_dir" => sort_dir, + "limit" => normalize_limit(Map.get(state, "limit", 100)), + "filters" => filters + } + end + + defp normalize_time(nil), do: "" + + defp normalize_time(time) when time in ["", "last_1h", "last_24h", "last_7d", "last_30d"] do + time + end + + defp normalize_time(_), do: "" + + defp normalize_bucket(%{downsample: true} = config, value) do + candidate = + value + |> safe_to_string() + |> String.trim() + + default = safe_to_string(Map.get(config, :default_bucket) || "") + + cond do + candidate == "" -> default + Regex.match?(~r/^\d+(?:s|m|h|d)$/, candidate) -> candidate + true -> default + end + end + + defp normalize_bucket(_config, _), do: "" + + defp normalize_agg(%{downsample: true} = config, value) do + candidate = + value + |> safe_to_string() + |> String.trim() + |> String.downcase() + + default = safe_to_string(Map.get(config, :default_agg) || "avg") + + if candidate in @allowed_downsample_aggs, do: candidate, else: default + end + + defp normalize_agg(_config, _), do: "avg" + + defp normalize_series_field(%{downsample: true} = config, value) do + candidate = + value + |> safe_to_string() + |> String.trim() + + allowed = Map.get(config, :series_fields) || Map.get(config, "series_fields") + default = safe_to_string(Map.get(config, :default_series_field) || "") + + cond do + candidate == "" -> default + is_list(allowed) and candidate in allowed -> candidate + is_nil(allowed) -> candidate + true -> default + end + end + + defp normalize_series_field(_config, _), do: "" + + defp ensure_downsample_time(time, %{downsample: true} = config, bucket) do + if bucket != "" and time == "" do + safe_to_string(config.default_time || "last_24h") + else + time + end + end + + defp ensure_downsample_time(time, _config, _bucket), do: time + + defp normalize_sort_field(entity, field) when is_binary(field) do + field = String.trim(field) + + allowed = allowed_sort_fields(entity) + + cond do + is_list(allowed) and Enum.member?(allowed, field) -> field + field != "" and is_nil(allowed) -> field + true -> default_sort_field(entity) + end + end + + defp normalize_sort_field(entity, _), do: default_sort_field(entity) + + defp normalize_limit(limit) when is_integer(limit) and limit > 0, do: min(limit, @max_limit) + + defp normalize_limit(limit) when is_binary(limit) do + case Integer.parse(String.trim(limit)) do + {value, ""} -> normalize_limit(value) + _ -> 100 + end + end + + defp normalize_limit(_), do: 100 + + defp allowed_sort_fields(entity) do + case Catalog.entity(entity) do + %{id: id} when id in ["devices", "pollers"] -> + if id == "pollers" do + ["last_seen", "poller_id", "status", "agent_count", "checker_count"] + else + ["last_seen", "hostname", "ip", "device_id"] + end + + _ -> + nil + end + end + + defp allowed_search_fields(entity) do + case Catalog.entity(entity) do + %{filter_fields: []} -> nil + %{filter_fields: fields} when is_list(fields) -> fields + _ -> nil + end + end + + defp default_sort_field(entity) do + Catalog.entity(entity).default_sort_field + end + + defp default_search_field(entity) do + case Catalog.entity(entity).default_filter_field do + "" -> "field" + value -> value + end + end + + defp maybe_add_time(tokens, ""), do: tokens + defp maybe_add_time(tokens, nil), do: tokens + defp maybe_add_time(tokens, time), do: tokens ++ ["time:#{time}"] + + defp maybe_add_downsample(tokens, entity, time, bucket, agg, series) do + config = Catalog.entity(entity) + + cond do + not Map.get(config, :downsample, false) -> + tokens + + safe_to_string(bucket) |> String.trim() == "" -> + tokens + + safe_to_string(time) |> String.trim() == "" -> + tokens + + true -> + bucket = safe_to_string(bucket) |> String.trim() + agg = safe_to_string(agg) |> String.trim() |> String.downcase() + series = safe_to_string(series) |> String.trim() + + tokens = + tokens + |> Kernel.++(["bucket:#{bucket}"]) + |> Kernel.++(if agg != "", do: ["agg:#{agg}"], else: []) + + if series != "" do + tokens ++ ["series:#{series}"] + else + tokens + end + end + end + + defp maybe_add_sort(tokens, "", _dir), do: tokens + defp maybe_add_sort(tokens, nil, _dir), do: tokens + defp maybe_add_sort(tokens, field, dir), do: tokens ++ ["sort:#{field}:#{dir}"] + + defp maybe_add_filters(tokens, filters) when is_list(filters) do + Enum.reduce(filters, tokens, fn %{"field" => field, "op" => op, "value" => value}, acc -> + field = field |> safe_to_string() |> String.trim() + value = value |> safe_to_string() |> String.trim() + + if value == "" or field == "" do + acc + else + escaped = String.replace(value, " ", "\\ ") + + token = + case op do + "equals" -> "#{field}:#{escaped}" + "not_equals" -> "!#{field}:#{escaped}" + "not_contains" -> "!#{field}:%#{escaped}%" + _ -> "#{field}:%#{escaped}%" + end + + acc ++ [token] + end + end) + end + + defp stringify_map(%{} = map) do + Map.new(map, fn + {k, v} when is_atom(k) -> {Atom.to_string(k), v} + {k, v} -> {to_string(k), v} + end) + end + + defp parse_tokens(tokens) do + parts = %{ + entity: nil, + time: "", + bucket: "", + agg: "avg", + series: "", + sort_field: nil, + sort_dir: "desc", + limit: 100, + filters: [] + } + + Enum.reduce_while(tokens, {:ok, parts}, fn token, {:ok, acc} -> + cond do + String.starts_with?(token, "in:") -> + entity = String.replace_prefix(token, "in:", "") + {:cont, {:ok, %{acc | entity: entity}}} + + String.starts_with?(token, "time:") -> + time = String.replace_prefix(token, "time:", "") + {:cont, {:ok, %{acc | time: time}}} + + String.starts_with?(token, "bucket:") -> + bucket = String.replace_prefix(token, "bucket:", "") + {:cont, {:ok, %{acc | bucket: bucket}}} + + String.starts_with?(token, "agg:") -> + agg = String.replace_prefix(token, "agg:", "") + {:cont, {:ok, %{acc | agg: agg}}} + + String.starts_with?(token, "series:") -> + series = String.replace_prefix(token, "series:", "") + {:cont, {:ok, %{acc | series: series}}} + + String.starts_with?(token, "sort:") -> + sort = String.replace_prefix(token, "sort:", "") + + case String.split(sort, ":", parts: 2) do + [field, dir] -> + {:cont, {:ok, %{acc | sort_field: field, sort_dir: dir}}} + + _ -> + {:halt, {:error, :invalid_sort}} + end + + String.starts_with?(token, "limit:") -> + limit = String.replace_prefix(token, "limit:", "") + {:cont, {:ok, %{acc | limit: normalize_limit(limit)}}} + + true -> + case String.split(token, ":", parts: 2) do + [field, value] -> + {field, negated} = parse_filter_field(field) + value = String.trim(value) + {op, final_value} = parse_filter_value(negated, value) + + filter = %{ + "field" => String.downcase(field), + "op" => op, + "value" => final_value + } + + {:cont, {:ok, %{acc | filters: acc.filters ++ [filter]}}} + + _ -> + {:halt, {:error, :invalid_token}} + end + end + end) + |> case do + {:ok, %{entity: nil}} -> + {:error, :missing_entity} + + {:ok, %{sort_field: nil} = parts} -> + {:ok, %{parts | sort_field: default_sort_field(parts.entity)}} + + other -> + other + end + end + + defp unwrap_like("%" <> rest) do + rest + |> String.trim_trailing("%") + |> String.replace("\\ ", " ") + end + + defp unwrap_like(value), do: value + + defp parse_filter_field(field) when is_binary(field) do + field = String.trim(field) + + case String.starts_with?(field, "!") do + true -> {String.replace_prefix(field, "!", ""), true} + false -> {field, false} + end + end + + defp parse_filter_field(_), do: {"", false} + + defp parse_filter_value(negated, value) do + if String.contains?(value, "%") do + op = if negated, do: "not_contains", else: "contains" + {op, unwrap_like(value)} + else + op = if negated, do: "not_equals", else: "equals" + {op, String.replace(value, "\\ ", " ")} + end + end + + defp reject_unknown_tokens(tokens, parts) do + known_prefixes = ["in:", "time:", "bucket:", "agg:", "series:", "sort:", "limit:"] + + unknown = + Enum.reject(tokens, fn token -> + Enum.any?(known_prefixes, &String.starts_with?(token, &1)) or + Enum.any?(parts.filters, fn %{"field" => field} -> + String.starts_with?(token, field <> ":") or + String.starts_with?(token, "!" <> field <> ":") + end) + end) + + if unknown == [], do: :ok, else: {:error, {:unsupported_tokens, unknown}} + end + + defp validate_downsample(entity, bucket, agg, series) do + config = Catalog.entity(entity) + + if Map.get(config, :downsample, false) do + bucket = safe_to_string(bucket) |> String.trim() + agg = safe_to_string(agg) |> String.trim() |> String.downcase() + series = safe_to_string(series) |> String.trim() + + cond do + bucket == "" -> + :ok + + not Regex.match?(~r/^\d+(?:s|m|h|d)$/, bucket) -> + {:error, {:invalid_bucket, bucket}} + + agg != "" and agg not in @allowed_downsample_aggs -> + {:error, {:invalid_agg, agg}} + + true -> + allowed = Map.get(config, :series_fields) || Map.get(config, "series_fields") + + if series != "" and is_list(allowed) and series not in allowed do + {:error, {:unsupported_series_field, series}} + else + :ok + end + end + else + if safe_to_string(bucket) |> String.trim() != "" do + {:error, :downsample_not_supported} + else + :ok + end + end + end + + defp validate_filter_fields(entity, filters) when entity in ["devices", "pollers"] do + allowed = allowed_search_fields(entity) + + invalid = + filters + |> Enum.map(&Map.get(&1, "field")) + |> Enum.reject(&is_nil/1) + |> Enum.reject(&(&1 in allowed)) + + if invalid == [], do: :ok, else: {:error, {:unsupported_filter_fields, invalid}} + end + + defp validate_filter_fields(entity, filters) do + case allowed_search_fields(entity) do + nil -> + if entity == "" do + {:error, :missing_entity} + else + _ = filters + :ok + end + + allowed -> + invalid = + filters + |> Enum.map(&Map.get(&1, "field")) + |> Enum.reject(&is_nil/1) + |> Enum.reject(&(&1 in allowed)) + + if invalid == [], do: :ok, else: {:error, {:unsupported_filter_fields, invalid}} + end + end + + defp normalize_filters(entity, filters) when is_list(filters) do + filters + |> Enum.map(fn + %{"field" => field, "op" => op, "value" => value} -> + %{ + "field" => normalize_filter_field(entity, field), + "op" => normalize_filter_op(op), + "value" => value |> safe_to_string() + } + + %{} = other -> + %{ + "field" => normalize_filter_field(entity, Map.get(other, "field")), + "op" => normalize_filter_op(Map.get(other, "op")), + "value" => Map.get(other, "value", "") |> safe_to_string() + } + + other -> + %{ + "field" => default_search_field(entity), + "op" => "contains", + "value" => safe_to_string(other) + } + end) + end + + defp normalize_filters(entity, %{} = filters_by_index) do + filters_by_index + |> Enum.sort_by(fn {k, _} -> + case Integer.parse(to_string(k)) do + {i, ""} -> i + _ -> 0 + end + end) + |> Enum.map(fn {_k, v} -> v end) + |> then(&normalize_filters(entity, &1)) + end + + defp normalize_filters(entity, _), do: normalize_filters(entity, []) + + defp normalize_filter_field(entity, field) when is_binary(field) do + field = String.trim(field) + + allowed = allowed_search_fields(entity) + + cond do + is_list(allowed) and Enum.member?(allowed, field) -> field + field != "" and is_nil(allowed) -> field + true -> default_search_field(entity) + end + end + + defp normalize_filter_field(entity, _), do: default_search_field(entity) + + defp normalize_filter_op(op) when op in @allowed_filter_ops, do: op + defp normalize_filter_op(_), do: "contains" + + defp safe_to_string(nil), do: "" + defp safe_to_string(value) when is_binary(value), do: value + defp safe_to_string(value) when is_integer(value), do: Integer.to_string(value) + defp safe_to_string(value) when is_float(value), do: :erlang.float_to_binary(value) + defp safe_to_string(value) when is_atom(value), do: Atom.to_string(value) + + defp safe_to_string(value) when is_list(value) do + if Enum.all?(value, &is_integer/1) do + to_string(value) + else + inspect(value) + end + end + + defp safe_to_string(value), do: inspect(value) +end diff --git a/web-ng/lib/serviceradar_web_ng_web/srql/catalog.ex b/web-ng/lib/serviceradar_web_ng_web/srql/catalog.ex new file mode 100644 index 000000000..1d1515024 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/srql/catalog.ex @@ -0,0 +1,321 @@ +defmodule ServiceRadarWebNGWeb.SRQL.Catalog do + @moduledoc false + + @entities [ + %{ + id: "devices", + label: "Devices", + route: "/devices", + default_time: "", + default_sort_field: "last_seen", + default_sort_dir: "desc", + default_filter_field: "hostname", + filter_fields: ["hostname", "ip", "device_id", "poller_id", "agent_id"], + downsample: false + }, + %{ + id: "pollers", + label: "Pollers", + route: "/pollers", + default_time: "", + default_sort_field: "last_seen", + default_sort_dir: "desc", + default_filter_field: "poller_id", + filter_fields: ["poller_id", "status", "component_id", "registration_source"], + downsample: false + }, + %{ + id: "events", + label: "Events", + route: "/events", + default_time: "last_7d", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "event_type", + filter_fields: [ + "event_type", + "device_id", + "poller_id", + "agent_id", + "severity", + "source", + "message" + ], + downsample: false + }, + %{ + id: "logs", + label: "Logs", + route: "/logs", + default_time: "last_7d", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "message", + filter_fields: ["device_id", "poller_id", "agent_id", "severity", "source", "message"], + downsample: false + }, + %{ + id: "services", + label: "Services", + route: "/services", + default_time: "last_7d", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "service_type", + filter_fields: [ + "device_id", + "poller_id", + "service_type", + "service_status", + "name", + "port", + "protocol" + ], + downsample: false + }, + %{ + id: "interfaces", + label: "Interfaces", + route: "/interfaces", + default_time: "last_7d", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "device_id", + filter_fields: [ + "device_id", + "if_name", + "if_index", + "mac", + "ip_addresses", + "admin_status", + "oper_status" + ], + downsample: false + }, + %{ + id: "timeseries_metrics", + label: "Timeseries Metrics", + route: "/dashboard", + default_time: "last_24h", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "metric_name", + filter_fields: [ + "poller_id", + "agent_id", + "metric_name", + "metric_type", + "device_id", + "target_device_ip", + "partition", + "if_index" + ], + downsample: true, + default_bucket: "5m", + default_agg: "avg", + default_series_field: "metric_name", + series_fields: [ + "metric_name", + "metric_type", + "device_id", + "poller_id", + "agent_id", + "partition", + "target_device_ip", + "if_index" + ] + }, + %{ + id: "snmp_metrics", + label: "SNMP Metrics", + route: "/dashboard", + default_time: "last_24h", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "metric_name", + filter_fields: [ + "poller_id", + "agent_id", + "metric_name", + "device_id", + "target_device_ip", + "partition", + "if_index" + ], + downsample: true, + default_bucket: "5m", + default_agg: "avg", + default_series_field: "metric_name", + series_fields: [ + "metric_name", + "device_id", + "poller_id", + "agent_id", + "partition", + "target_device_ip", + "if_index" + ] + }, + %{ + id: "rperf_metrics", + label: "rPerf Metrics", + route: "/dashboard", + default_time: "last_24h", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "metric_name", + filter_fields: [ + "poller_id", + "agent_id", + "metric_name", + "device_id", + "target_device_ip", + "partition", + "if_index" + ], + downsample: true, + default_bucket: "5m", + default_agg: "avg", + default_series_field: "metric_name", + series_fields: [ + "metric_name", + "device_id", + "poller_id", + "agent_id", + "partition", + "target_device_ip", + "if_index" + ] + }, + %{ + id: "cpu_metrics", + label: "CPU Metrics", + route: "/dashboard", + default_time: "last_24h", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "device_id", + filter_fields: [ + "poller_id", + "agent_id", + "host_id", + "device_id", + "partition", + "cluster", + "label", + "core_id" + ], + downsample: true, + default_bucket: "5m", + default_agg: "avg", + default_series_field: "device_id", + series_fields: [ + "device_id", + "host_id", + "poller_id", + "agent_id", + "core_id", + "label", + "cluster", + "partition" + ] + }, + %{ + id: "memory_metrics", + label: "Memory Metrics", + route: "/dashboard", + default_time: "last_24h", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "device_id", + filter_fields: ["poller_id", "agent_id", "host_id", "device_id", "partition"], + downsample: true, + default_bucket: "5m", + default_agg: "avg", + default_series_field: "device_id", + series_fields: ["device_id", "host_id", "poller_id", "agent_id", "partition"] + }, + %{ + id: "disk_metrics", + label: "Disk Metrics", + route: "/dashboard", + default_time: "last_24h", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "device_id", + filter_fields: [ + "poller_id", + "agent_id", + "host_id", + "device_id", + "partition", + "mount_point", + "device_name" + ], + downsample: true, + default_bucket: "5m", + default_agg: "avg", + default_series_field: "mount_point", + series_fields: [ + "device_id", + "host_id", + "poller_id", + "agent_id", + "partition", + "mount_point", + "device_name" + ] + }, + %{ + id: "process_metrics", + label: "Process Metrics", + route: "/dashboard", + default_time: "last_24h", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "name", + filter_fields: [ + "poller_id", + "agent_id", + "host_id", + "device_id", + "partition", + "name", + "pid", + "status" + ], + downsample: true, + default_bucket: "5m", + default_agg: "avg", + default_series_field: "name", + series_fields: [ + "device_id", + "host_id", + "poller_id", + "agent_id", + "partition", + "name", + "pid", + "status" + ] + } + ] + + def entities, do: @entities + + def entity(id) when is_binary(id) do + Enum.find(@entities, &(&1.id == id)) || + %{ + id: id, + label: String.capitalize(id), + default_time: "", + default_sort_field: "timestamp", + default_sort_dir: "desc", + default_filter_field: "", + filter_fields: [], + downsample: false + } + end + + def entity(_), do: entity("devices") +end diff --git a/web-ng/lib/serviceradar_web_ng_web/srql/page.ex b/web-ng/lib/serviceradar_web_ng_web/srql/page.ex new file mode 100644 index 000000000..c55b5fdff --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/srql/page.ex @@ -0,0 +1,562 @@ +defmodule ServiceRadarWebNGWeb.SRQL.Page do + @moduledoc false + + alias ServiceRadarWebNGWeb.SRQL.Builder + alias ServiceRadarWebNGWeb.SRQL.Catalog + + def init(socket, entity, opts \\ []) when is_binary(entity) do + default_limit = Keyword.get(opts, :default_limit, 100) + builder_available = Keyword.get(opts, :builder_available, true) + + {builder_supported, builder_sync, builder, query} = + if builder_available do + builder = Builder.default_state(entity, default_limit) + {true, true, builder, Builder.build(builder)} + else + {false, false, %{}, default_query(entity, default_limit)} + end + + srql = %{ + enabled: true, + entity: entity, + page_path: nil, + query: query, + draft: query, + error: nil, + loading: false, + builder_available: builder_available, + builder_open: false, + builder_supported: builder_supported, + builder_sync: builder_sync, + builder: builder + } + + Phoenix.Component.assign(socket, :srql, srql) + end + + def load_list(socket, params, uri, list_assign_key, opts \\ []) when is_atom(list_assign_key) do + srql = Map.get(socket.assigns, :srql, %{}) + entity = srql_entity(srql, opts) + builder_available = Map.get(srql, :builder_available, false) + + default_limit = Keyword.get(opts, :default_limit, 20) + max_limit = Keyword.get(opts, :max_limit, 100) + limit_assign_key = Keyword.get(opts, :limit_assign_key, :limit) + + limit = parse_limit(Map.get(params, "limit"), default_limit, max_limit) + cursor = normalize_optional_string(Map.get(params, "cursor")) + + builder = + if builder_available do + base = + if Map.has_key?(params, "q") do + Map.get(srql, :builder, Builder.default_state(entity, limit)) + else + Builder.default_state(entity, limit) + end + + base + |> Map.put("entity", entity) + |> Map.put("limit", limit) + else + %{} + end + + default_query = + if builder_available do + Builder.build(builder) + else + default_query(entity, limit) + end + + query = normalize_query_param(Map.get(params, "q"), default_query) + + {builder_supported, builder_sync, builder_state} = + if builder_available do + case Builder.parse(query) do + {:ok, parsed} -> {true, true, parsed} + {:error, _} -> {false, false, builder} + end + else + {false, false, %{}} + end + + srql_module = srql_module() + + {results, error, viz_meta, pagination} = + case srql_module.query(query, %{cursor: cursor, limit: limit}) do + {:ok, %{"results" => results, "pagination" => pag} = resp} when is_list(results) -> + viz = + case Map.get(resp, "viz") do + value when is_map(value) -> value + _ -> nil + end + + {results, nil, viz, pag || %{}} + + {:ok, %{"results" => results} = resp} when is_list(results) -> + viz = + case Map.get(resp, "viz") do + value when is_map(value) -> value + _ -> nil + end + + {results, nil, viz, %{}} + + {:ok, other} -> + {[], "unexpected SRQL response: #{inspect(other)}", nil, %{}} + + {:error, reason} -> + {[], "SRQL error: #{format_error(reason)}", nil, %{}} + end + + page_path = uri |> normalize_uri() |> URI.parse() |> Map.get(:path) + + display_limit = + query + |> extract_limit_from_srql(limit, default_limit, max_limit) + + srql = + srql + |> Map.merge(%{ + enabled: true, + entity: entity, + page_path: page_path, + query: query, + draft: query, + error: error, + viz: viz_meta, + loading: false, + builder_available: builder_available, + builder_supported: builder_supported, + builder_sync: builder_sync, + builder: builder_state, + pagination: pagination + }) + + socket + |> Phoenix.Component.assign(:srql, srql) + |> Phoenix.Component.assign(limit_assign_key, display_limit) + |> Phoenix.Component.assign(list_assign_key, results) + end + + defp normalize_optional_string(nil), do: nil + defp normalize_optional_string(""), do: nil + defp normalize_optional_string(value) when is_binary(value), do: value + defp normalize_optional_string(_), do: nil + + def handle_event(socket, event, params, opts \\ []) + + def handle_event(socket, "srql_change", params, _opts) do + case normalize_param_to_string(extract_param(params, "q")) do + nil -> + socket + + query -> + srql = update_srql(socket, &Map.put(&1, :draft, query)) + Phoenix.Component.assign(socket, :srql, srql) + end + end + + def handle_event(socket, "srql_submit", params, opts) do + srql = Map.get(socket.assigns, :srql, %{}) + fallback_path = Keyword.get(opts, :fallback_path) || "/" + extra_params = normalize_extra_params(Keyword.get(opts, :extra_params, %{})) + + raw_query = normalize_param_to_string(extract_param(params, "q")) || "" + query = raw_query |> String.trim() + query = if query == "", do: to_string(srql[:query] || ""), else: query + + limit_assign_key = Keyword.get(opts, :limit_assign_key, :limit) + limit = Map.get(socket.assigns, limit_assign_key) + + # Extract entity from query and determine the target route + target_path = entity_route_from_query(query, fallback_path) + current_path = srql[:page_path] || fallback_path + + nav_params = Map.merge(extra_params, %{"q" => query, "limit" => limit}) + + socket + |> Phoenix.Component.assign(:srql, Map.put(srql, :builder_open, false)) + |> navigate_to_path(target_path, current_path, nav_params) + end + + def handle_event(socket, "srql_builder_toggle", _params, opts) do + srql = Map.get(socket.assigns, :srql, %{}) + + if not Map.get(srql, :builder_available, false) do + Phoenix.Component.assign(socket, :srql, Map.put(srql, :builder_open, false)) + else + if Map.get(srql, :builder_open, false) do + Phoenix.Component.assign(socket, :srql, Map.put(srql, :builder_open, false)) + else + entity = srql_entity(srql, opts) + limit_assign_key = Keyword.get(opts, :limit_assign_key, :limit) + limit = Map.get(socket.assigns, limit_assign_key, 100) + + current = srql[:draft] || srql[:query] || "" + current = normalize_param_to_string(current) || "" + + {supported, sync, builder} = + case Builder.parse(current) do + {:ok, builder} -> + {true, true, builder} + + {:error, _reason} -> + {false, false, Builder.default_state(entity, limit)} + end + + updated = + srql + |> Map.put(:builder_open, true) + |> Map.put(:builder_supported, supported) + |> Map.put(:builder_sync, sync) + |> Map.put(:builder, builder) + + Phoenix.Component.assign(socket, :srql, updated) + end + end + end + + def handle_event(socket, "srql_builder_change", params, _opts) do + srql = Map.get(socket.assigns, :srql, %{}) + + if not Map.get(srql, :builder_available, false) do + Phoenix.Component.assign(socket, :srql, srql) + else + builder_params = + case extract_param(params, "builder") do + %{} = v -> v + _ -> %{} + end + + builder = Builder.update(Map.get(srql, :builder, %{}), builder_params) + + updated = Map.put(srql, :builder, builder) + + updated = + if updated[:builder_supported] and updated[:builder_sync] do + Map.put(updated, :draft, Builder.build(builder)) + else + updated + end + + Phoenix.Component.assign(socket, :srql, updated) + end + end + + def handle_event(socket, "srql_builder_add_filter", _params, opts) do + srql = Map.get(socket.assigns, :srql, %{}) + + if not Map.get(srql, :builder_available, false) do + Phoenix.Component.assign(socket, :srql, srql) + else + entity = current_builder_entity(srql, opts) + builder = Map.get(srql, :builder, Builder.default_state(entity)) + + filters = + builder + |> Map.get("filters", []) + |> List.wrap() + + next = %{ + "field" => default_filter_field(entity, filters), + "op" => "contains", + "value" => "" + } + + updated_builder = Map.put(builder, "filters", filters ++ [next]) + + updated = + srql + |> Map.put(:builder, updated_builder) + |> maybe_sync_builder_to_draft() + + Phoenix.Component.assign(socket, :srql, updated) + end + end + + def handle_event(socket, "srql_builder_remove_filter", params, opts) do + srql = Map.get(socket.assigns, :srql, %{}) + + if not Map.get(srql, :builder_available, false) do + Phoenix.Component.assign(socket, :srql, srql) + else + entity = current_builder_entity(srql, opts) + builder = Map.get(srql, :builder, Builder.default_state(entity)) + + filters = + builder + |> Map.get("filters", []) + |> List.wrap() + + idx = extract_param(params, "idx") + raw_idx = normalize_param_to_string(idx) || "" + + index = + case Integer.parse(raw_idx) do + {i, ""} -> i + _ -> -1 + end + + updated_filters = + filters + |> Enum.with_index() + |> Enum.reject(fn {_f, i} -> i == index end) + |> Enum.map(fn {f, _i} -> f end) + + updated_builder = Map.put(builder, "filters", updated_filters) + + updated = + srql + |> Map.put(:builder, updated_builder) + |> maybe_sync_builder_to_draft() + + Phoenix.Component.assign(socket, :srql, updated) + end + end + + def handle_event(socket, "srql_builder_apply", _params, _opts) do + srql = Map.get(socket.assigns, :srql, %{}) + + if not Map.get(srql, :builder_available, false) do + Phoenix.Component.assign(socket, :srql, srql) + else + builder = Map.get(srql, :builder, %{}) + query = Builder.build(builder) + + updated = + srql + |> Map.put(:builder_supported, true) + |> Map.put(:builder_sync, true) + |> Map.put(:draft, query) + + Phoenix.Component.assign(socket, :srql, updated) + end + end + + def handle_event(socket, "srql_builder_run", _params, opts) do + srql = Map.get(socket.assigns, :srql, %{}) + fallback_path = Keyword.get(opts, :fallback_path) || "/" + extra_params = normalize_extra_params(Keyword.get(opts, :extra_params, %{})) + + if not Map.get(srql, :builder_available, false) do + socket + else + # Build query from current builder state + builder = Map.get(srql, :builder, %{}) + query = Builder.build(builder) + + limit_assign_key = Keyword.get(opts, :limit_assign_key, :limit) + limit = Map.get(socket.assigns, limit_assign_key) + + # Extract entity from builder and determine the target route + queried_entity = Map.get(builder, "entity", "devices") + target_path = Catalog.entity(queried_entity)[:route] || fallback_path + current_path = srql[:page_path] || fallback_path + + nav_params = Map.merge(extra_params, %{"q" => query, "limit" => limit}) + + # Close builder and navigate with the new query + socket + |> Phoenix.Component.assign(:srql, Map.put(srql, :builder_open, false)) + |> navigate_to_path(target_path, current_path, nav_params) + end + end + + def handle_event(socket, _event, _params, _opts), do: socket + + # Extracts entity from SRQL query and returns the appropriate route + defp entity_route_from_query(query, fallback_path) when is_binary(query) do + case Regex.run(~r/(?:^|\s)in:(\S+)/, query) do + [_, entity] -> + Catalog.entity(entity)[:route] || fallback_path + + _ -> + fallback_path + end + end + + defp entity_route_from_query(_query, fallback_path), do: fallback_path + + # Navigates to target path - uses push_patch if same path, push_navigate if different + defp navigate_to_path(socket, target_path, current_path, params) do + url = target_path <> "?" <> URI.encode_query(params) + + if target_path == current_path do + Phoenix.LiveView.push_patch(socket, to: url) + else + Phoenix.LiveView.push_navigate(socket, to: url) + end + end + + defp normalize_extra_params(%{} = params) do + params + |> Enum.reduce(%{}, fn + {k, v}, acc when is_atom(k) -> Map.put(acc, Atom.to_string(k), v) + {k, v}, acc when is_binary(k) -> Map.put(acc, k, v) + _, acc -> acc + end) + |> Map.reject(fn {_k, v} -> is_nil(v) or v == "" end) + end + + defp normalize_extra_params(_), do: %{} + + defp srql_entity(srql, opts) do + case Map.get(srql, :entity) || Keyword.get(opts, :entity) do + value when is_binary(value) and value != "" -> value + _ -> "devices" + end + end + + defp update_srql(socket, fun) do + socket.assigns + |> Map.get(:srql, %{}) + |> fun.() + end + + defp extract_param(%{} = params, key) when is_binary(key) do + case key do + "q" -> Map.get(params, "q") || Map.get(params, :q) + "builder" -> Map.get(params, "builder") || Map.get(params, :builder) + "idx" -> Map.get(params, "idx") || Map.get(params, :idx) + _ -> Map.get(params, key) + end + end + + defp extract_param(_params, _key), do: nil + + defp normalize_param_to_string(nil), do: nil + defp normalize_param_to_string(value) when is_binary(value), do: value + + defp normalize_param_to_string([first | _]) when is_binary(first), do: first + + defp normalize_param_to_string(value) when is_list(value) do + if Enum.all?(value, &is_integer/1) do + to_string(value) + else + inspect(value) + end + end + + defp normalize_param_to_string(value) when is_integer(value), do: Integer.to_string(value) + defp normalize_param_to_string(value) when is_float(value), do: :erlang.float_to_binary(value) + defp normalize_param_to_string(value) when is_atom(value), do: Atom.to_string(value) + defp normalize_param_to_string(value) when is_map(value), do: inspect(value) + defp normalize_param_to_string(value), do: inspect(value) + + defp normalize_query_param(value, default_query) do + case normalize_param_to_string(value) do + nil -> + default_query + + query -> + query + |> String.trim() + |> case do + "" -> default_query + other -> String.slice(other, 0, 4000) + end + end + end + + defp normalize_uri(uri) when is_binary(uri), do: uri + defp normalize_uri(%URI{} = uri), do: URI.to_string(uri) + defp normalize_uri(nil), do: "" + defp normalize_uri(other), do: inspect(other) + + defp maybe_sync_builder_to_draft(srql) do + if srql[:builder_supported] and srql[:builder_sync] do + Map.put(srql, :draft, Builder.build(srql[:builder] || %{})) + else + srql + end + end + + defp srql_module do + Application.get_env(:serviceradar_web_ng, :srql_module, ServiceRadarWebNG.SRQL) + end + + defp parse_limit(nil, default, _max), do: default + + defp parse_limit(limit, default, max) when is_binary(limit) do + case Integer.parse(limit) do + {value, ""} -> parse_limit(value, default, max) + _ -> default + end + end + + defp parse_limit(limit, _default, max) when is_integer(limit) and limit > 0 do + min(limit, max) + end + + defp parse_limit(_limit, default, _max), do: default + + defp extract_limit_from_srql(query, fallback, default, max) when is_binary(query) do + case Regex.run(~r/(?:^|\s)limit:(\d+)(?:\s|$)/, query) do + [_, raw] -> parse_limit(raw, default, max) + _ -> fallback + end + end + + defp extract_limit_from_srql(_query, fallback, _default, _max), do: fallback + + defp format_error(%Jason.DecodeError{} = err), do: Exception.message(err) + defp format_error(%ArgumentError{} = err), do: Exception.message(err) + defp format_error(reason) when is_binary(reason), do: reason + defp format_error(reason), do: inspect(reason) + + defp default_filter_field(entity, _filters) do + Catalog.entity(entity).default_filter_field + end + + defp current_builder_entity(srql, opts) do + candidate = + srql + |> Map.get(:builder, %{}) + |> Map.get("entity") + |> normalize_param_to_string() + |> case do + nil -> "" + value -> value + end + |> String.trim() + + if candidate != "" do + candidate + else + srql_entity(srql, opts) + end + end + + defp default_query(entity, limit) do + limit = parse_limit(limit, 100, 500) + + tokens = + ["in:#{entity}"] + |> maybe_add_default_time(entity) + |> Kernel.++(["limit:#{limit}"]) + + Enum.join(tokens, " ") + end + + defp maybe_add_default_time(tokens, entity) do + if entity in [ + "events", + "logs", + "device_updates", + "otel_metrics", + "timeseries_metrics", + "snmp_metrics", + "rperf_metrics", + "cpu_metrics", + "memory_metrics", + "disk_metrics", + "process_metrics" + ] do + tokens ++ ["time:last_7d"] + else + tokens + end + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/srql/viz.ex b/web-ng/lib/serviceradar_web_ng_web/srql/viz.ex new file mode 100644 index 000000000..99500a16a --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/srql/viz.ex @@ -0,0 +1,173 @@ +defmodule ServiceRadarWebNGWeb.SRQL.Viz do + @moduledoc false + + @type inferred :: + :none + | {:timeseries, %{x: String.t(), y: String.t(), points: list({DateTime.t(), number()})}} + | {:categories, + %{label: String.t(), value: String.t(), items: list({String.t(), number()})}} + + @max_points 120 + @max_categories 12 + + def infer(rows) when is_list(rows) do + rows = Enum.filter(rows, &is_map/1) + + with {:ok, inferred} <- infer_timeseries(rows) do + inferred + else + _ -> + case infer_categories(rows) do + {:ok, inferred} -> inferred + _ -> :none + end + end + end + + def infer(_), do: :none + + defp infer_timeseries([]), do: {:error, :no_rows} + + defp infer_timeseries([first | _] = rows) do + keys = Map.keys(first) |> Enum.map(&to_string/1) + + x_key = + Enum.find(keys, fn k -> + k in ["timestamp", "ts", "time", "bucket", "inserted_at", "observed_at"] + end) + + y_key = + Enum.find(keys, fn k -> + k in ["value", "count", "avg", "min", "max", "p95", "p99"] + end) || Enum.find(keys, &numeric_column?(rows, &1)) + + with true <- is_binary(x_key), + true <- is_binary(y_key), + points when is_list(points) and points != [] <- extract_points(rows, x_key, y_key) do + {:ok, {:timeseries, %{x: x_key, y: y_key, points: points}}} + else + _ -> {:error, :no_timeseries} + end + end + + defp infer_categories([]), do: {:error, :no_rows} + + defp infer_categories([first | _] = rows) do + keys = Map.keys(first) |> Enum.map(&to_string/1) + + value_key = + Enum.find(keys, fn k -> + k in ["count", "value"] + end) || Enum.find(keys, &numeric_column?(rows, &1)) + + label_key = + keys + |> Enum.reject(&(&1 == value_key)) + |> Enum.find(&stringish_column?(rows, &1)) + + with true <- is_binary(label_key), + true <- is_binary(value_key), + items when is_list(items) and items != [] <- + extract_categories(rows, label_key, value_key) do + {:ok, {:categories, %{label: label_key, value: value_key, items: items}}} + else + _ -> {:error, :no_categories} + end + end + + defp extract_points(rows, x_key, y_key) do + rows + |> Enum.take(@max_points) + |> Enum.reduce([], fn row, acc -> + with {:ok, dt} <- parse_datetime(Map.get(row, x_key)), + {:ok, y} <- parse_number(Map.get(row, y_key)) do + [{dt, y} | acc] + else + _ -> acc + end + end) + |> Enum.reverse() + end + + defp extract_categories(rows, label_key, value_key) do + rows + |> Enum.reduce(%{}, fn row, acc -> + with label when is_binary(label) <- to_string(Map.get(row, label_key) || ""), + true <- label != "", + {:ok, value} <- parse_number(Map.get(row, value_key)) do + Map.update(acc, label, value, &(&1 + value)) + else + _ -> acc + end + end) + |> Enum.sort_by(fn {_k, v} -> -v end) + |> Enum.take(@max_categories) + end + + defp numeric_column?(rows, key) do + Enum.any?(rows, fn row -> + case Map.get(row, key) do + v when is_integer(v) or is_float(v) -> + true + + v when is_binary(v) -> + match?({_, ""}, Float.parse(v)) or match?({_, ""}, Integer.parse(v)) + + _ -> + false + end + end) + end + + defp stringish_column?(rows, key) do + Enum.any?(rows, fn row -> + v = Map.get(row, key) + is_binary(v) and byte_size(v) > 0 + end) + end + + defp parse_number(value) when is_integer(value), do: {:ok, value * 1.0} + defp parse_number(value) when is_float(value), do: {:ok, value} + + defp parse_number(value) when is_binary(value) do + value = String.trim(value) + + cond do + value == "" -> + {:error, :empty} + + match?({_, ""}, Float.parse(value)) -> + {v, ""} = Float.parse(value) + {:ok, v} + + match?({_, ""}, Integer.parse(value)) -> + {v, ""} = Integer.parse(value) + {:ok, v * 1.0} + + true -> + {:error, :nan} + end + end + + defp parse_number(_), do: {:error, :not_numeric} + + defp parse_datetime(%DateTime{} = dt), do: {:ok, dt} + + defp parse_datetime(%NaiveDateTime{} = ndt) do + {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + end + + defp parse_datetime(value) when is_binary(value) do + value = String.trim(value) + + with {:error, _} <- DateTime.from_iso8601(value), + {:ok, ndt} <- NaiveDateTime.from_iso8601(value) do + {:ok, DateTime.from_naive!(ndt, "Etc/UTC")} + else + {:ok, dt, _offset} -> {:ok, dt} + {:error, _} -> {:error, :invalid_datetime} + end + end + + defp parse_datetime(_), do: {:error, :not_datetime} +end diff --git a/web-ng/lib/serviceradar_web_ng_web/telemetry.ex b/web-ng/lib/serviceradar_web_ng_web/telemetry.ex new file mode 100644 index 000000000..cbe779de9 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/telemetry.ex @@ -0,0 +1,93 @@ +defmodule ServiceRadarWebNGWeb.Telemetry do + use Supervisor + import Telemetry.Metrics + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg, name: __MODULE__) + end + + @impl true + def init(_arg) do + children = [ + # Telemetry poller will execute the given period measurements + # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics + {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} + # Add reporters as children of your supervision tree. + # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + def metrics do + [ + # Phoenix Metrics + summary("phoenix.endpoint.start.system_time", + unit: {:native, :millisecond} + ), + summary("phoenix.endpoint.stop.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.start.system_time", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.exception.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.stop.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.socket_connected.duration", + unit: {:native, :millisecond} + ), + sum("phoenix.socket_drain.count"), + summary("phoenix.channel_joined.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_handled_in.duration", + tags: [:event], + unit: {:native, :millisecond} + ), + + # Database Metrics + summary("serviceradar_web_ng.repo.query.total_time", + unit: {:native, :millisecond}, + description: "The sum of the other measurements" + ), + summary("serviceradar_web_ng.repo.query.decode_time", + unit: {:native, :millisecond}, + description: "The time spent decoding the data received from the database" + ), + summary("serviceradar_web_ng.repo.query.query_time", + unit: {:native, :millisecond}, + description: "The time spent executing the query" + ), + summary("serviceradar_web_ng.repo.query.queue_time", + unit: {:native, :millisecond}, + description: "The time spent waiting for a database connection" + ), + summary("serviceradar_web_ng.repo.query.idle_time", + unit: {:native, :millisecond}, + description: + "The time the connection spent waiting before being checked out for the query" + ), + + # VM Metrics + summary("vm.memory.total", unit: {:byte, :kilobyte}), + summary("vm.total_run_queue_lengths.total"), + summary("vm.total_run_queue_lengths.cpu"), + summary("vm.total_run_queue_lengths.io") + ] + end + + defp periodic_measurements do + [ + # A module, function and arguments to be invoked periodically. + # This function must call :telemetry.execute/3 and a metric must be added above. + # {ServiceRadarWebNGWeb, :count_users, []} + ] + end +end diff --git a/web-ng/lib/serviceradar_web_ng_web/user_auth.ex b/web-ng/lib/serviceradar_web_ng_web/user_auth.ex new file mode 100644 index 000000000..8783f1ed4 --- /dev/null +++ b/web-ng/lib/serviceradar_web_ng_web/user_auth.ex @@ -0,0 +1,287 @@ +defmodule ServiceRadarWebNGWeb.UserAuth do + use ServiceRadarWebNGWeb, :verified_routes + + import Plug.Conn + import Phoenix.Controller + + alias ServiceRadarWebNG.Accounts + alias ServiceRadarWebNG.Accounts.Scope + + # Make the remember me cookie valid for 14 days. This should match + # the session validity setting in UserToken. + @max_cookie_age_in_days 14 + @remember_me_cookie "_service_radar_web_ng_web_user_remember_me" + @remember_me_options [ + sign: true, + max_age: @max_cookie_age_in_days * 24 * 60 * 60, + same_site: "Lax" + ] + + # How old the session token should be before a new one is issued. When a request is made + # with a session token older than this value, then a new session token will be created + # and the session and remember-me cookies (if set) will be updated with the new token. + # Lowering this value will result in more tokens being created by active users. Increasing + # it will result in less time before a session token expires for a user to get issued a new + # token. This can be set to a value greater than `@max_cookie_age_in_days` to disable + # the reissuing of tokens completely. + @session_reissue_age_in_days 7 + + @doc """ + Logs the user in. + + Redirects to the session's `:user_return_to` path + or falls back to the `signed_in_path/1`. + """ + def log_in_user(conn, user, params \\ %{}) do + user_return_to = get_session(conn, :user_return_to) + + conn + |> create_or_extend_session(user, params) + |> redirect(to: user_return_to || signed_in_path(conn)) + end + + @doc """ + Logs the user out. + + It clears all session data for safety. See renew_session. + """ + def log_out_user(conn) do + user_token = get_session(conn, :user_token) + user_token && Accounts.delete_user_session_token(user_token) + + if live_socket_id = get_session(conn, :live_socket_id) do + ServiceRadarWebNGWeb.Endpoint.broadcast(live_socket_id, "disconnect", %{}) + end + + conn + |> renew_session(nil) + |> delete_resp_cookie(@remember_me_cookie) + |> redirect(to: ~p"/") + end + + @doc """ + Authenticates the user by looking into the session and remember me token. + + Will reissue the session token if it is older than the configured age. + """ + def fetch_current_scope_for_user(conn, _opts) do + with {token, conn} <- ensure_user_token(conn), + {user, token_inserted_at} <- Accounts.get_user_by_session_token(token) do + conn + |> assign(:current_scope, Scope.for_user(user)) + |> maybe_reissue_user_session_token(user, token_inserted_at) + else + nil -> assign(conn, :current_scope, Scope.for_user(nil)) + end + end + + defp ensure_user_token(conn) do + if token = get_session(conn, :user_token) do + {token, conn} + else + conn = fetch_cookies(conn, signed: [@remember_me_cookie]) + + if token = conn.cookies[@remember_me_cookie] do + {token, conn |> put_token_in_session(token) |> put_session(:user_remember_me, true)} + else + nil + end + end + end + + # Reissue the session token if it is older than the configured reissue age. + defp maybe_reissue_user_session_token(conn, user, token_inserted_at) do + token_age = DateTime.diff(DateTime.utc_now(:second), token_inserted_at, :day) + + if token_age >= @session_reissue_age_in_days do + create_or_extend_session(conn, user, %{}) + else + conn + end + end + + # This function is the one responsible for creating session tokens + # and storing them safely in the session and cookies. It may be called + # either when logging in, during sudo mode, or to renew a session which + # will soon expire. + # + # When the session is created, rather than extended, the renew_session + # function will clear the session to avoid fixation attacks. See the + # renew_session function to customize this behaviour. + defp create_or_extend_session(conn, user, params) do + token = Accounts.generate_user_session_token(user) + remember_me = get_session(conn, :user_remember_me) + + conn + |> renew_session(user) + |> put_token_in_session(token) + |> maybe_write_remember_me_cookie(token, params, remember_me) + end + + # Do not renew session if the user is already logged in + # to prevent CSRF errors or data being lost in tabs that are still open + defp renew_session(conn, user) when conn.assigns.current_scope.user.id == user.id do + conn + end + + # This function renews the session ID and erases the whole + # session to avoid fixation attacks. If there is any data + # in the session you may want to preserve after log in/log out, + # you must explicitly fetch the session data before clearing + # and then immediately set it after clearing, for example: + # + # defp renew_session(conn, _user) do + # delete_csrf_token() + # preferred_locale = get_session(conn, :preferred_locale) + # + # conn + # |> configure_session(renew: true) + # |> clear_session() + # |> put_session(:preferred_locale, preferred_locale) + # end + # + defp renew_session(conn, _user) do + delete_csrf_token() + + conn + |> configure_session(renew: true) + |> clear_session() + end + + defp maybe_write_remember_me_cookie(conn, token, %{"remember_me" => "true"}, _), + do: write_remember_me_cookie(conn, token) + + defp maybe_write_remember_me_cookie(conn, token, _params, true), + do: write_remember_me_cookie(conn, token) + + defp maybe_write_remember_me_cookie(conn, _token, _params, _), do: conn + + defp write_remember_me_cookie(conn, token) do + conn + |> put_session(:user_remember_me, true) + |> put_resp_cookie(@remember_me_cookie, token, @remember_me_options) + end + + defp put_token_in_session(conn, token) do + conn + |> put_session(:user_token, token) + |> put_session(:live_socket_id, user_session_topic(token)) + end + + @doc """ + Disconnects existing sockets for the given tokens. + """ + def disconnect_sessions(tokens) do + Enum.each(tokens, fn %{token: token} -> + ServiceRadarWebNGWeb.Endpoint.broadcast(user_session_topic(token), "disconnect", %{}) + end) + end + + defp user_session_topic(token), do: "users_sessions:#{Base.url_encode64(token)}" + + @doc """ + Handles mounting and authenticating the current_scope in LiveViews. + + ## `on_mount` arguments + + * `:mount_current_scope` - Assigns current_scope + to socket assigns based on user_token, or nil if + there's no user_token or no matching user. + + * `:require_authenticated` - Authenticates the user from the session, + and assigns the current_scope to socket assigns based + on user_token. + Redirects to login page if there's no logged user. + + ## Examples + + Use the `on_mount` lifecycle macro in LiveViews to mount or authenticate + the `current_scope`: + + defmodule ServiceRadarWebNGWeb.PageLive do + use ServiceRadarWebNGWeb, :live_view + + on_mount {ServiceRadarWebNGWeb.UserAuth, :mount_current_scope} + ... + end + + Or use the `live_session` of your router to invoke the on_mount callback: + + live_session :authenticated, on_mount: [{ServiceRadarWebNGWeb.UserAuth, :require_authenticated}] do + live "/profile", ProfileLive, :index + end + """ + def on_mount(:mount_current_scope, _params, session, socket) do + {:cont, mount_current_scope(socket, session)} + end + + def on_mount(:require_authenticated, _params, session, socket) do + socket = mount_current_scope(socket, session) + + if socket.assigns.current_scope && socket.assigns.current_scope.user do + {:cont, socket} + else + socket = + socket + |> Phoenix.LiveView.put_flash(:error, "You must log in to access this page.") + |> Phoenix.LiveView.redirect(to: ~p"/users/log-in") + + {:halt, socket} + end + end + + def on_mount(:require_sudo_mode, _params, session, socket) do + socket = mount_current_scope(socket, session) + + if Accounts.sudo_mode?(socket.assigns.current_scope.user, -10) do + {:cont, socket} + else + socket = + socket + |> Phoenix.LiveView.put_flash(:error, "You must re-authenticate to access this page.") + |> Phoenix.LiveView.redirect(to: ~p"/users/log-in") + + {:halt, socket} + end + end + + defp mount_current_scope(socket, session) do + Phoenix.Component.assign_new(socket, :current_scope, fn -> + {user, _} = + if user_token = session["user_token"] do + Accounts.get_user_by_session_token(user_token) + end || {nil, nil} + + Scope.for_user(user) + end) + end + + @doc "Returns the path to redirect to after log in." + # the user was already logged in, redirect to dashboard + def signed_in_path(%Plug.Conn{assigns: %{current_scope: %Scope{user: %Accounts.User{}}}}) do + ~p"/dashboard" + end + + def signed_in_path(_), do: ~p"/" + + @doc """ + Plug for routes that require the user to be authenticated. + """ + def require_authenticated_user(conn, _opts) do + if conn.assigns.current_scope && conn.assigns.current_scope.user do + conn + else + conn + |> put_flash(:error, "You must log in to access this page.") + |> maybe_store_return_to() + |> redirect(to: ~p"/users/log-in") + |> halt() + end + end + + defp maybe_store_return_to(%{method: "GET"} = conn) do + put_session(conn, :user_return_to, current_path(conn)) + end + + defp maybe_store_return_to(conn), do: conn +end diff --git a/web-ng/mix.exs b/web-ng/mix.exs new file mode 100644 index 000000000..64d9747ba --- /dev/null +++ b/web-ng/mix.exs @@ -0,0 +1,97 @@ +defmodule ServiceRadarWebNG.MixProject do + use Mix.Project + + def project do + [ + app: :serviceradar_web_ng, + version: "0.1.0", + elixir: "~> 1.15", + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + aliases: aliases(), + deps: deps(), + compilers: [:phoenix_live_view] ++ Mix.compilers(), + listeners: [Phoenix.CodeReloader] + ] + end + + # Configuration for the OTP application. + # + # Type `mix help compile.app` for more information. + def application do + [ + mod: {ServiceRadarWebNG.Application, []}, + extra_applications: [:logger, :runtime_tools] + ] + end + + def cli do + [ + preferred_envs: [precommit: :test] + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + # Specifies your project dependencies. + # + # Type `mix help deps` for examples and options. + defp deps do + [ + {:bcrypt_elixir, "~> 3.0"}, + {:phoenix, "~> 1.8.3"}, + {:phoenix_ecto, "~> 4.5"}, + {:ecto_sql, "~> 3.13"}, + {:postgrex, ">= 0.0.0"}, + {:phoenix_html, "~> 4.1"}, + {:phoenix_live_reload, "~> 1.2", only: :dev}, + {:phoenix_live_view, "~> 1.1.0"}, + {:stream_data, "~> 1.1", only: :test}, + {:lazy_html, ">= 0.1.0", only: :test}, + {:phoenix_live_dashboard, "~> 0.8.3"}, + {:esbuild, "~> 0.10", runtime: Mix.env() == :dev}, + {:tailwind, "~> 0.3", runtime: Mix.env() == :dev}, + {:heroicons, + github: "tailwindlabs/heroicons", + tag: "v2.2.0", + sparse: "optimized", + app: false, + compile: false, + depth: 1}, + {:swoosh, "~> 1.16"}, + {:req, "~> 0.5"}, + {:rustler, "~> 0.36"}, + {:telemetry_metrics, "~> 1.0"}, + {:telemetry_poller, "~> 1.0"}, + {:gettext, "~> 1.0"}, + {:jason, "~> 1.2"}, + {:dns_cluster, "~> 0.2.0"}, + {:bandit, "~> 1.5"} + ] + end + + # Aliases are shortcuts or tasks specific to the current project. + # For example, to install project dependencies and perform other setup tasks, run: + # + # $ mix setup + # + # See the documentation for `Mix` for more info on aliases. + defp aliases do + [ + setup: ["deps.get", "ecto.setup", "assets.setup", "assets.build"], + "ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"], + "ecto.reset": ["ecto.drop", "ecto.setup"], + test: ["ecto.migrate --quiet", "test"], + "assets.setup": ["tailwind.install --if-missing", "esbuild.install --if-missing"], + "assets.build": ["compile", "tailwind serviceradar_web_ng", "esbuild serviceradar_web_ng"], + "assets.deploy": [ + "tailwind serviceradar_web_ng --minify", + "esbuild serviceradar_web_ng --minify", + "phx.digest" + ], + precommit: ["compile --warnings-as-errors", "deps.unlock --unused", "format", "test"] + ] + end +end diff --git a/web-ng/mix.lock b/web-ng/mix.lock new file mode 100644 index 000000000..70588d445 --- /dev/null +++ b/web-ng/mix.lock @@ -0,0 +1,50 @@ +%{ + "bandit": {:hex, :bandit, "1.9.0", "6dc1ff2c30948dfecf32db574cc3447c7b9d70e0b61140098df3818870b01b76", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "2538aaa1663b40ca9cbd8ca1f8a540cb49e5baf34c6ffef068369cc45f9146f2"}, + "bcrypt_elixir": {:hex, :bcrypt_elixir, "3.3.2", "d50091e3c9492d73e17fc1e1619a9b09d6a5ef99160eb4d736926fd475a16ca3", [:make, :mix], [{:comeonin, "~> 5.3", [hex: :comeonin, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "471be5151874ae7931911057d1467d908955f93554f7a6cd1b7d804cac8cef53"}, + "cc_precompiler": {:hex, :cc_precompiler, "0.1.11", "8c844d0b9fb98a3edea067f94f616b3f6b29b959b6b3bf25fee94ffe34364768", [:mix], [{:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "3427232caf0835f94680e5bcf082408a70b48ad68a5f5c0b02a3bea9f3a075b9"}, + "comeonin": {:hex, :comeonin, "5.5.1", "5113e5f3800799787de08a6e0db307133850e635d34e9fab23c70b6501669510", [:mix], [], "hexpm", "65aac8f19938145377cee73973f192c5645873dcf550a8a6b18187d17c13ccdb"}, + "db_connection": {:hex, :db_connection, "2.8.1", "9abdc1e68c34c6163f6fb96a96532272d13ad7ca45262156ae8b7ec6d9dc4bec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a61a3d489b239d76f326e03b98794fb8e45168396c925ef25feb405ed09da8fd"}, + "decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"}, + "dns_cluster": {:hex, :dns_cluster, "0.2.0", "aa8eb46e3bd0326bd67b84790c561733b25c5ba2fe3c7e36f28e88f384ebcb33", [:mix], [], "hexpm", "ba6f1893411c69c01b9e8e8f772062535a4cf70f3f35bcc964a324078d8c8240"}, + "ecto": {:hex, :ecto, "3.13.5", "9d4a69700183f33bf97208294768e561f5c7f1ecf417e0fa1006e4a91713a834", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "df9efebf70cf94142739ba357499661ef5dbb559ef902b68ea1f3c1fabce36de"}, + "ecto_sql": {:hex, :ecto_sql, "3.13.3", "81f7067dd1951081888529002dbc71f54e5e891b69c60195040ea44697e1104a", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.13.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "5751caea36c8f5dd0d1de6f37eceffea19d10bd53f20e5bbe31c45f2efc8944a"}, + "elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"}, + "esbuild": {:hex, :esbuild, "0.10.0", "b0aa3388a1c23e727c5a3e7427c932d89ee791746b0081bbe56103e9ef3d291f", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "468489cda427b974a7cc9f03ace55368a83e1a7be12fba7e30969af78e5f8c70"}, + "expo": {:hex, :expo, "1.1.1", "4202e1d2ca6e2b3b63e02f69cfe0a404f77702b041d02b58597c00992b601db5", [:mix], [], "hexpm", "5fb308b9cb359ae200b7e23d37c76978673aa1b06e2b3075d814ce12c5811640"}, + "file_system": {:hex, :file_system, "1.1.1", "31864f4685b0148f25bd3fbef2b1228457c0c89024ad67f7a81a3ffbc0bbad3a", [:mix], [], "hexpm", "7a15ff97dfe526aeefb090a7a9d3d03aa907e100e262a0f8f7746b78f8f87a5d"}, + "finch": {:hex, :finch, "0.20.0", "5330aefb6b010f424dcbbc4615d914e9e3deae40095e73ab0c1bb0968933cadf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2658131a74d051aabfcba936093c903b8e89da9a1b63e430bee62045fa9b2ee2"}, + "fine": {:hex, :fine, "0.1.4", "b19a89c1476c7c57afb5f9314aed5960b5bc95d5277de4cb5ee8e1d1616ce379", [:mix], [], "hexpm", "be3324cc454a42d80951cf6023b9954e9ff27c6daa255483b3e8d608670303f5"}, + "gettext": {:hex, :gettext, "1.0.2", "5457e1fd3f4abe47b0e13ff85086aabae760497a3497909b8473e0acee57673b", [:mix], [{:expo, "~> 0.5.1 or ~> 1.0", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "eab805501886802071ad290714515c8c4a17196ea76e5afc9d06ca85fb1bfeb3"}, + "heroicons": {:git, "https://github.com/tailwindlabs/heroicons.git", "0435d4ca364a608cc75e2f8683d374e55abbae26", [tag: "v2.2.0", sparse: "optimized", depth: 1]}, + "hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"}, + "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, + "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, + "lazy_html": {:hex, :lazy_html, "0.1.8", "677a8642e644eef8de98f3040e2520d42d0f0f8bd6c5cd49db36504e34dffe91", [:make, :mix], [{:cc_precompiler, "~> 0.1", [hex: :cc_precompiler, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.9.0", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:fine, "~> 0.1.0", [hex: :fine, repo: "hexpm", optional: false]}], "hexpm", "0d8167d930b704feb94b41414ca7f5779dff9bca7fcf619fcef18de138f08736"}, + "mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"}, + "mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"}, + "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, + "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, + "phoenix": {:hex, :phoenix, "1.8.3", "49ac5e485083cb1495a905e47eb554277bdd9c65ccb4fc5100306b350151aa95", [:mix], [{:bandit, "~> 1.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "36169f95cc2e155b78be93d9590acc3f462f1e5438db06e6248613f27c80caec"}, + "phoenix_ecto": {:hex, :phoenix_ecto, "4.7.0", "75c4b9dfb3efdc42aec2bd5f8bccd978aca0651dbcbc7a3f362ea5d9d43153c6", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "1d75011e4254cb4ddf823e81823a9629559a1be93b4321a6a5f11a5306fbf4cc"}, + "phoenix_html": {:hex, :phoenix_html, "4.3.0", "d3577a5df4b6954cd7890c84d955c470b5310bb49647f0a114a6eeecc850f7ad", [:mix], [], "hexpm", "3eaa290a78bab0f075f791a46a981bbe769d94bc776869f4f3063a14f30497ad"}, + "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.7", "405880012cb4b706f26dd1c6349125bfc903fb9e44d1ea668adaf4e04d4884b7", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "3a8625cab39ec261d48a13b7468dc619c0ede099601b084e343968309bd4d7d7"}, + "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.6.2", "b18b0773a1ba77f28c52decbb0f10fd1ac4d3ae5b8632399bbf6986e3b665f62", [:mix], [{:file_system, "~> 0.2.10 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "d1f89c18114c50d394721365ffb428cce24f1c13de0467ffa773e2ff4a30d5b9"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "1.1.19", "c95e9acbc374fb796ee3e24bfecc8213123c74d9f9e45667ca40bb0a4d242953", [:mix], [{:igniter, ">= 0.6.16 and < 1.0.0-0", [hex: :igniter, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:lazy_html, "~> 0.1.0", [hex: :lazy_html, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0 or ~> 1.8.0-rc", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "d5ad357d6b21562a5b431f0ad09dfe76db9ce5648c6949f1aac334c8c4455d32"}, + "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.2.0", "ff3a5616e1bed6804de7773b92cbccfc0b0f473faf1f63d7daf1206c7aeaaa6f", [:mix], [], "hexpm", "adc313a5bf7136039f63cfd9668fde73bba0765e0614cba80c06ac9460ff3e96"}, + "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, + "plug": {:hex, :plug, "1.19.1", "09bac17ae7a001a68ae393658aa23c7e38782be5c5c00c80be82901262c394c0", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "560a0017a8f6d5d30146916862aaf9300b7280063651dd7e532b8be168511e62"}, + "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"}, + "postgrex": {:hex, :postgrex, "0.21.1", "2c5cc830ec11e7a0067dd4d623c049b3ef807e9507a424985b8dcf921224cd88", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "27d8d21c103c3cc68851b533ff99eef353e6a0ff98dc444ea751de43eb48bdac"}, + "req": {:hex, :req, "0.5.16", "99ba6a36b014458e52a8b9a0543bfa752cb0344b2a9d756651db1281d4ba4450", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "974a7a27982b9b791df84e8f6687d21483795882a7840e8309abdbe08bb06f09"}, + "rustler": {:hex, :rustler, "0.37.1", "721434020c7f6f8e1cdc57f44f75c490435b01de96384f8ccb96043f12e8a7e0", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "24547e9b8640cf00e6a2071acb710f3e12ce0346692e45098d84d45cdb54fd79"}, + "stream_data": {:hex, :stream_data, "1.2.0", "58dd3f9e88afe27dc38bef26fce0c84a9e7a96772b2925c7b32cd2435697a52b", [:mix], [], "hexpm", "eb5c546ee3466920314643edf68943a5b14b32d1da9fe01698dc92b73f89a9ed"}, + "swoosh": {:hex, :swoosh, "1.19.9", "4eb2c471b8cf06adbdcaa1d57a0ad53c0ed9348ce8586a06cc491f9f0dbcb553", [:mix], [{:bandit, ">= 1.0.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:cowboy, "~> 1.1 or ~> 2.4", [hex: :cowboy, repo: "hexpm", optional: true]}, {:ex_aws, "~> 2.1", [hex: :ex_aws, repo: "hexpm", optional: true]}, {:finch, "~> 0.6", [hex: :finch, repo: "hexpm", optional: true]}, {:gen_smtp, "~> 0.13 or ~> 1.0", [hex: :gen_smtp, repo: "hexpm", optional: true]}, {:hackney, "~> 1.9", [hex: :hackney, repo: "hexpm", optional: true]}, {:idna, "~> 6.0", [hex: :idna, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mail, "~> 0.2", [hex: :mail, repo: "hexpm", optional: true]}, {:mime, "~> 1.1 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mua, "~> 0.2.3", [hex: :mua, repo: "hexpm", optional: true]}, {:multipart, "~> 0.4", [hex: :multipart, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, ">= 1.0.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:req, "~> 0.5.10 or ~> 0.6 or ~> 1.0", [hex: :req, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "516898263a64925c31723c56bc7999a26e97b04e869707f681f4c9bca7ee1688"}, + "tailwind": {:hex, :tailwind, "0.4.1", "e7bcc222fe96a1e55f948e76d13dd84a1a7653fb051d2a167135db3b4b08d3e9", [:mix], [], "hexpm", "6249d4f9819052911120dbdbe9e532e6bd64ea23476056adb7f730aa25c220d1"}, + "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.3.0", "d5c46420126b5ac2d72bc6580fb4f537d35e851cc0f8dbd571acf6d6e10f5ec7", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "51f18bed7128544a50f75897db9974436ea9bfba560420b646af27a9a9b35211"}, + "thousand_island": {:hex, :thousand_island, "1.4.3", "2158209580f633be38d43ec4e3ce0a01079592b9657afff9080d5d8ca149a3af", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "6e4ce09b0fd761a58594d02814d40f77daff460c48a7354a15ab353bb998ea0b"}, + "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.1", "a48703a25c170eedadca83b11e88985af08d35f37c6f664d6dcfb106a97782fc", [:rebar3], [], "hexpm", "b3a917854ce3ae233619744ad1e0102e05673136776fb2fa76234f3e03b23642"}, + "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, + "websock_adapter": {:hex, :websock_adapter, "0.5.9", "43dc3ba6d89ef5dec5b1d0a39698436a1e856d000d84bf31a3149862b01a287f", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "5534d5c9adad3c18a0f58a9371220d75a803bf0b9a3d87e6fe072faaeed76a08"}, +} diff --git a/web-ng/native/srql_nif/Cargo.lock b/web-ng/native/srql_nif/Cargo.lock new file mode 100644 index 000000000..14d15f19c --- /dev/null +++ b/web-ng/native/srql_nif/Cargo.lock @@ -0,0 +1,3502 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core 0.4.5", + "axum-macros", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit 0.7.3", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +dependencies = [ + "axum-core 0.5.5", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit 0.8.4", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde_core", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" + +[[package]] +name = "bb8" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89aabfae550a5c44b43ab941844ffcd2e993cb6900b342debf59e9ea74acdb8" +dependencies = [ + "async-trait", + "futures-util", + "parking_lot", + "tokio", +] + +[[package]] +name = "bb8" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "457d7ed3f888dfd2c7af56d4975cade43c622f74bdcddfed6d4352f57acc6310" +dependencies = [ + "futures-util", + "parking_lot", + "portable-atomic", + "tokio", +] + +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "cc" +version = "1.2.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "der_derive", + "flagset", + "zeroize", +] + +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "deranged" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "diesel" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c415189028b232660655e4893e8bc25ca7aee8e96888db66d9edb400535456a" +dependencies = [ + "bitflags", + "byteorder", + "chrono", + "diesel_derives", + "downcast-rs", + "itoa", + "pq-sys", + "serde_json", +] + +[[package]] +name = "diesel-async" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13096fb8dae53f2d411c4b523bec85f45552ed3044a2ab4d85fb2092d9cb4f34" +dependencies = [ + "bb8 0.9.1", + "diesel", + "futures-core", + "futures-util", + "scoped-futures", + "tokio", + "tokio-postgres", +] + +[[package]] +name = "diesel_derives" +version = "2.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8587cbca3c929fb198e7950d761d31ca72b80aa6e07c1b7bec5879d187720436" +dependencies = [ + "diesel_table_macro_syntax", + "dsl_auto_type", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "diesel_table_macro_syntax" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" +dependencies = [ + "syn", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "downcast-rs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc" + +[[package]] +name = "dsl_auto_type" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd122633e4bef06db27737f21d3738fb89c8f6d5360d6d9d7635dda142a7757e" +dependencies = [ + "darling", + "either", + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "envy" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f47e0157f2cb54f5ae1bd371b30a2ae4311e1c028f575cd4e81de7353215965" +dependencies = [ + "serde", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flagset" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7ac824320a75a52197e8f2d787f6a38b6718bb6897a35142d749af3c0e8f4fe" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-sink", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.12.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2 0.6.1", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "inventory" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc61209c082fbeb19919bee74b176221b27223e27b65d781eb91af24eb1fb46e" +dependencies = [ + "rustversion", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "js-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "kvutil" +version = "0.1.0" +dependencies = [ + "anyhow", + "pem", + "prost 0.13.5", + "rustls 0.21.12", + "serde", + "serde_json", + "spiffe", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "toml", + "tonic 0.12.3", + "tonic-build 0.12.3", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.178" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags", + "libc", + "redox_syscall", +] + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "logos" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff472f899b4ec2d99161c51f60ff7075eeb3097069a36050d8037a6325eb8154" +dependencies = [ + "logos-derive", +] + +[[package]] +name = "logos-codegen" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "192a3a2b90b0c05b27a0b2c43eecdb7c415e29243acc3f89cc8247a5b693045c" +dependencies = [ + "beef", + "fnv", + "lazy_static", + "proc-macro2", + "quote", + "regex-syntax", + "rustc_version", + "syn", +] + +[[package]] +name = "logos-derive" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "605d9697bcd5ef3a42d38efc51541aa3d6a4a25f7ab6d1ed0da5ac632a26b470" +dependencies = [ + "logos-codegen", +] + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "miette" +version = "7.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f98efec8807c63c752b5bd61f862c165c115b0a35685bdcfd9238c7aeb592b7" +dependencies = [ + "cfg-if", + "miette-derive", + "unicode-width", +] + +[[package]] +name = "miette-derive" +version = "7.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "multimap" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "oid-registry" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" +dependencies = [ + "asn1-rs", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64", + "serde_core", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset", + "indexmap 2.12.1", +] + +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_shared", + "serde", +] + +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "postgres-protocol" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbef655056b916eb868048276cfd5d6a7dea4f81560dfd047f97c8c6fe3fcfd4" +dependencies = [ + "base64", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand 0.9.2", + "sha2", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4605b7c057056dd35baeb6ac0c0338e4975b1f2bef0f65da953285eb007095" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", +] + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "pq-src" +version = "0.3.10+libpq-18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ef39ce621f4993d6084fdcd4cbf1e01c84bdba53109cfad095d2cf441b85b9" +dependencies = [ + "cc", + "openssl-sys", +] + +[[package]] +name = "pq-sys" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "574ddd6a267294433f140b02a726b0640c43cf7c6f717084684aaa3b285aba61" +dependencies = [ + "libc", + "pkg-config", + "pq-src", + "vcpkg", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive 0.13.5", +] + +[[package]] +name = "prost" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +dependencies = [ + "bytes", + "prost-derive 0.14.1", +] + +[[package]] +name = "prost-build" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +dependencies = [ + "heck", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.13.5", + "prost-types 0.13.5", + "regex", + "syn", + "tempfile", +] + +[[package]] +name = "prost-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +dependencies = [ + "heck", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.14.1", + "prost-types 0.14.1", + "pulldown-cmark", + "pulldown-cmark-to-cmark", + "regex", + "syn", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-derive" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-reflect" +version = "0.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b89455ef41ed200cafc47c76c552ee7792370ac420497e551f16123a9135f76e" +dependencies = [ + "logos", + "miette", + "prost 0.14.1", + "prost-types 0.14.1", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost 0.13.5", +] + +[[package]] +name = "prost-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +dependencies = [ + "prost 0.14.1", +] + +[[package]] +name = "protox" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f25a07a73c6717f0b9bbbd685918f5df9815f7efba450b83d9c9dea41f0e3a1" +dependencies = [ + "bytes", + "miette", + "prost 0.14.1", + "prost-reflect", + "prost-types 0.14.1", + "protox-parse", + "thiserror 2.0.17", +] + +[[package]] +name = "protox-parse" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "072eee358134396a4643dff81cfff1c255c9fbd3fb296be14bdb6a26f9156366" +dependencies = [ + "logos", + "miette", + "prost-types 0.14.1", + "thiserror 2.0.17", +] + +[[package]] +name = "pulldown-cmark" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +dependencies = [ + "bitflags", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark-to-cmark" +version = "21.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" +dependencies = [ + "pulldown-cmark", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-lite" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da" + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustler" +version = "0.36.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3fe55230a9c379733dd38ee67d4072fa5c558b2e22b76b0e7f924390456e003" +dependencies = [ + "inventory", + "libloading", + "regex-lite", + "rustler_codegen", +] + +[[package]] +name = "rustler_codegen" +version = "0.36.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3b8de901ae61418e2036245d28e41ef58080d04f40b68430471ae36a4e84ed" +dependencies = [ + "heck", + "inventory", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + +[[package]] +name = "rustls" +version = "0.23.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki 0.103.8", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "scoped-futures" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b24aae2d0636530f359e9d5ef0c04669d11c5e756699b27a6a6d845d8329091" +dependencies = [ + "pin-project-lite", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.12.1", + "schemars 0.9.0", + "schemars 1.1.0", + "serde_core", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +dependencies = [ + "libc", +] + +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "spiffe" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f805aae79ba04a1f0b2e5e06de0bbb2e7851ea3588d03fdf8f25075a0111603" +dependencies = [ + "anyhow", + "hyper-util", + "jsonwebtoken", + "log", + "pkcs8", + "prost 0.14.1", + "prost-build 0.14.1", + "prost-types 0.14.1", + "protox", + "serde", + "serde_json", + "simple_asn1", + "thiserror 2.0.17", + "time", + "tokio", + "tokio-stream", + "tokio-util", + "tonic 0.14.2", + "tonic-prost", + "tonic-prost-build", + "tower 0.5.2", + "url", + "x509-parser", + "zeroize", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "srql" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum 0.7.9", + "base64", + "bb8 0.8.6", + "chrono", + "diesel", + "diesel-async", + "envy", + "http", + "kvutil", + "once_cell", + "parking_lot", + "pq-sys", + "rustls 0.23.35", + "rustls-pemfile", + "serde", + "serde_json", + "serde_with", + "thiserror 1.0.69", + "tokio", + "tokio-postgres", + "tokio-postgres-rustls", + "tower 0.5.2", + "tower-http", + "tracing", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "srql_nif" +version = "0.1.0" +dependencies = [ + "anyhow", + "rustler", + "serde_json", + "srql", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "time-macros" +version = "0.2.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tls_codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de2e01245e2bb89d6f05801c564fa27624dbd7b1846859876c7dad82e90bf6b" +dependencies = [ + "tls_codec_derive", + "zeroize", +] + +[[package]] +name = "tls_codec_derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2e76690929402faae40aebdda620a2c0e25dd6d3b9afe48867dfd95991f4bd" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.1", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-postgres" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b40d66d9b2cfe04b628173409368e58247e8eddbbd3b0e6c6ba1d09f20f6c9e" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand 0.9.2", + "socket2 0.6.1", + "tokio", + "tokio-util", + "whoami", +] + +[[package]] +name = "tokio-postgres-rustls" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27d684bad428a0f2481f42241f821db42c54e2dc81d8c00db8536c506b0a0144" +dependencies = [ + "const-oid", + "ring", + "rustls 0.23.35", + "tokio", + "tokio-postgres", + "tokio-rustls", + "x509-cert", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls 0.23.35", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap 2.12.1", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.7.9", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost 0.13.5", + "rustls-pemfile", + "socket2 0.5.10", + "tokio", + "tokio-rustls", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +dependencies = [ + "async-trait", + "axum 0.8.7", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "socket2 0.6.1", + "sync_wrapper", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build 0.13.5", + "prost-types 0.13.5", + "quote", + "syn", +] + +[[package]] +name = "tonic-build" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" +dependencies = [ + "prettyplease", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost 0.14.1", + "tonic 0.14.2", +] + +[[package]] +name = "tonic-prost-build" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build 0.14.1", + "prost-types 0.14.1", + "quote", + "syn", + "tempfile", + "tonic-build 0.14.2", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 2.12.1", + "pin-project-lite", + "slab", + "sync_wrapper", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "http", + "http-body", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "unicode-normalization" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-properties" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "uuid" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "serde_core", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + +[[package]] +name = "wasm-bindgen" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "whoami" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d" +dependencies = [ + "libredox", + "wasite", + "web-sys", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "x509-cert" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" +dependencies = [ + "const-oid", + "der", + "spki", + "tls_codec", +] + +[[package]] +name = "x509-parser" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/web-ng/native/srql_nif/Cargo.toml b/web-ng/native/srql_nif/Cargo.toml new file mode 100644 index 000000000..d87710fd8 --- /dev/null +++ b/web-ng/native/srql_nif/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "srql_nif" +version = "0.1.0" +edition = "2021" + +[lib] +name = "srql_nif" +crate-type = ["cdylib"] + +[dependencies] +anyhow = "1.0.89" +rustler = "0.36.1" +serde_json = "1.0.132" +srql = { path = "../../../rust/srql" } + +[workspace] diff --git a/web-ng/native/srql_nif/src/lib.rs b/web-ng/native/srql_nif/src/lib.rs new file mode 100644 index 000000000..0e3d2eb5e --- /dev/null +++ b/web-ng/native/srql_nif/src/lib.rs @@ -0,0 +1,49 @@ +use rustler::{Encoder, Env, Term}; + +mod atoms { + rustler::atoms! { + ok, + error + } +} + +#[rustler::nif(schedule = "DirtyCpu")] +fn translate( + env: Env, + srql_query: String, + limit: Option, + cursor: Option, + direction: Option, + mode: Option, +) -> Term { + let direction = match direction.as_deref() { + Some("prev") => srql::QueryDirection::Prev, + _ => srql::QueryDirection::Next, + }; + + let request = srql::QueryRequest { + query: srql_query, + limit, + cursor, + direction, + mode, + }; + + let config = srql::config::AppConfig::embedded("postgres://unused/db".to_string()); + + let response = match srql::query::translate_request(&config, request) { + Ok(response) => response, + Err(err) => return (atoms::error(), err.to_string()).encode(env), + }; + + match serde_json::to_string(&response) { + Ok(json) => (atoms::ok(), json).encode(env), + Err(err) => ( + atoms::error(), + format!("failed to encode SRQL translation: {err}"), + ) + .encode(env), + } +} + +rustler::init!("Elixir.ServiceRadarWebNG.SRQL.Native"); diff --git a/web-ng/priv/gettext/en/LC_MESSAGES/errors.po b/web-ng/priv/gettext/en/LC_MESSAGES/errors.po new file mode 100644 index 000000000..844c4f5ce --- /dev/null +++ b/web-ng/priv/gettext/en/LC_MESSAGES/errors.po @@ -0,0 +1,112 @@ +## `msgid`s in this file come from POT (.pot) files. +## +## Do not add, change, or remove `msgid`s manually here as +## they're tied to the ones in the corresponding POT file +## (with the same domain). +## +## Use `mix gettext.extract --merge` or `mix gettext.merge` +## to merge POT files into PO files. +msgid "" +msgstr "" +"Language: en\n" + +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} byte(s)" +msgid_plural "should be %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} byte(s)" +msgid_plural "should be at least %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} byte(s)" +msgid_plural "should be at most %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/web-ng/priv/gettext/errors.pot b/web-ng/priv/gettext/errors.pot new file mode 100644 index 000000000..eef2de2ba --- /dev/null +++ b/web-ng/priv/gettext/errors.pot @@ -0,0 +1,109 @@ +## This is a PO Template file. +## +## `msgid`s here are often extracted from source code. +## Add new translations manually only if they're dynamic +## translations that can't be statically extracted. +## +## Run `mix gettext.extract` to bring this file up to +## date. Leave `msgstr`s empty as changing them here has no +## effect: edit them in PO (`.po`) files instead. +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} byte(s)" +msgid_plural "should be %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} byte(s)" +msgid_plural "should be at least %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} byte(s)" +msgid_plural "should be at most %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/web-ng/priv/repo/migrations/.formatter.exs b/web-ng/priv/repo/migrations/.formatter.exs new file mode 100644 index 000000000..49f9151ed --- /dev/null +++ b/web-ng/priv/repo/migrations/.formatter.exs @@ -0,0 +1,4 @@ +[ + import_deps: [:ecto_sql], + inputs: ["*.exs"] +] diff --git a/web-ng/priv/repo/migrations/20251215020449_create_ng_users_auth_tables.exs b/web-ng/priv/repo/migrations/20251215020449_create_ng_users_auth_tables.exs new file mode 100644 index 000000000..a40c25135 --- /dev/null +++ b/web-ng/priv/repo/migrations/20251215020449_create_ng_users_auth_tables.exs @@ -0,0 +1,28 @@ +defmodule ServiceRadarWebNG.Repo.Migrations.CreateNgUsersAuthTables do + use Ecto.Migration + + def change do + create table(:ng_users) do + add :email, :string, null: false + add :hashed_password, :string + add :confirmed_at, :utc_datetime + + timestamps(type: :utc_datetime) + end + + create unique_index(:ng_users, [:email]) + + create table(:ng_users_tokens) do + add :user_id, references(:ng_users, on_delete: :delete_all), null: false + add :token, :binary, null: false + add :context, :string, null: false + add :sent_to, :string + add :authenticated_at, :utc_datetime + + timestamps(type: :utc_datetime, updated_at: false) + end + + create index(:ng_users_tokens, [:user_id]) + create unique_index(:ng_users_tokens, [:context, :token]) + end +end diff --git a/web-ng/priv/repo/seeds.exs b/web-ng/priv/repo/seeds.exs new file mode 100644 index 000000000..cba2cccc1 --- /dev/null +++ b/web-ng/priv/repo/seeds.exs @@ -0,0 +1,11 @@ +# Script for populating the database. You can run it as: +# +# mix run priv/repo/seeds.exs +# +# Inside the script, you can read and write to any of your +# repositories directly: +# +# ServiceRadarWebNG.Repo.insert!(%ServiceRadarWebNG.SomeSchema{}) +# +# We recommend using the bang functions (`insert!`, `update!` +# and so on) as they will fail if something goes wrong. diff --git a/web-ng/priv/static/favicon.ico b/web-ng/priv/static/favicon.ico new file mode 100644 index 000000000..7f372bfc2 Binary files /dev/null and b/web-ng/priv/static/favicon.ico differ diff --git a/web-ng/priv/static/images/logo.svg b/web-ng/priv/static/images/logo.svg new file mode 100644 index 000000000..3bd7056c8 --- /dev/null +++ b/web-ng/priv/static/images/logo.svg @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/web-ng/priv/static/robots.txt b/web-ng/priv/static/robots.txt new file mode 100644 index 000000000..26e06b5f1 --- /dev/null +++ b/web-ng/priv/static/robots.txt @@ -0,0 +1,5 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/web-ng/test/integration/graph_cypher_integration_test.exs b/web-ng/test/integration/graph_cypher_integration_test.exs new file mode 100644 index 000000000..815e2efa1 --- /dev/null +++ b/web-ng/test/integration/graph_cypher_integration_test.exs @@ -0,0 +1,153 @@ +if System.get_env("SRQL_INTEGRATION") != "1" do + defmodule ServiceRadarWebNG.GraphCypherIntegrationTest do + use ExUnit.Case, async: true + + @moduletag :skip + + test "set SRQL_INTEGRATION=1 to enable" do + assert true + end + end +else + defmodule ServiceRadarWebNG.GraphCypherIntegrationTest do + use ExUnit.Case, async: false + + alias ServiceRadarWebNGWeb.Dashboard.Plugins.Topology + + @graph_name "serviceradar" + + defmodule PostgrexHelpers do + @moduledoc false + + def connection_opts do + config = ServiceRadarWebNG.Repo.config() + + ssl = + case Keyword.get(config, :ssl, false) do + false -> false + true -> true + ssl_opts when is_list(ssl_opts) -> {:opts, ssl_opts} + end + + base = [ + hostname: Keyword.get(config, :hostname, "localhost"), + username: Keyword.get(config, :username, "postgres"), + password: Keyword.get(config, :password), + database: Keyword.get(config, :database), + port: Keyword.get(config, :port, 5432) + ] + + case ssl do + false -> base + true -> Keyword.put(base, :ssl, true) + {:opts, ssl_opts} -> base |> Keyword.put(:ssl, true) |> Keyword.put(:ssl_opts, ssl_opts) + end + end + + def age_available?(conn) do + with {:ok, %Postgrex.Result{rows: [[_]]}} <- + Postgrex.query(conn, "SELECT 1 FROM pg_namespace WHERE nspname = 'ag_catalog'", []), + {:ok, %Postgrex.Result{rows: [[_]]}} <- + Postgrex.query( + conn, + """ + SELECT 1 + FROM pg_proc p + JOIN pg_namespace n ON n.oid = p.pronamespace + WHERE n.nspname = 'ag_catalog' AND p.proname = 'cypher' + LIMIT 1 + """, + [] + ) do + true + else + _ -> false + end + rescue + _ -> false + end + + def ensure_graph(conn, graph_name) do + existing = + Postgrex.query( + conn, + "SELECT 1 FROM ag_catalog.ag_graph WHERE name = $1 LIMIT 1", + [graph_name] + ) + + case existing do + {:ok, %Postgrex.Result{num_rows: 1}} -> + :ok + + {:ok, _} -> + case Postgrex.query(conn, "SELECT ag_catalog.create_graph($1)", [graph_name]) do + {:ok, _} -> :ok + {:error, err} -> {:error, err} + end + + {:error, err} -> + {:error, err} + end + rescue + err -> {:error, err} + end + end + + setup_all do + owner = Ecto.Adapters.SQL.Sandbox.start_owner!(ServiceRadarWebNG.Repo, shared: true) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(owner) end) + + conn = start_supervised!({Postgrex, PostgrexHelpers.connection_opts()}) + + if not PostgrexHelpers.age_available?(conn) do + {:skip, "Apache AGE is not available (ag_catalog.cypher missing)"} + else + case PostgrexHelpers.ensure_graph(conn, @graph_name) do + :ok -> + :ok + + {:error, err} -> + {:skip, "Apache AGE graph #{@graph_name} not available: #{inspect(err)}"} + end + end + end + + test "graph_cypher passthrough supports explicit nodes/edges payload" do + query = ~s(in:graph_cypher cypher:"RETURN {nodes: [], edges: []} AS result" limit:5) + assert {:ok, %{"results" => [payload]}} = ServiceRadarWebNG.SRQL.query(query) + + assert is_map(payload) + assert Map.get(payload, "nodes") == [] + assert Map.get(payload, "edges") == [] + assert Topology.supports?(%{"results" => [payload]}) + end + + test "graph_cypher wraps node rows into nodes/edges" do + query = ~s(in:graph_cypher cypher:"RETURN {id: 'n1', label: 'Node'} AS result" limit:5) + assert {:ok, %{"results" => [payload]}} = ServiceRadarWebNG.SRQL.query(query) + + assert is_map(payload) + assert [%{"id" => "n1"} | _] = Map.get(payload, "nodes") + assert Map.get(payload, "edges") == [] + assert Topology.supports?(%{"results" => [payload]}) + end + + test "graph_cypher wraps edge rows into nodes/edges" do + query = + ~s(in:graph_cypher cypher:"RETURN {start_id: 'n1', end_id: 'n2', type: 'links_to'} AS result" limit:5) + + assert {:ok, %{"results" => [payload]}} = ServiceRadarWebNG.SRQL.query(query) + + assert is_map(payload) + nodes = Map.get(payload, "nodes") + edges = Map.get(payload, "edges") + + assert is_list(nodes) + assert is_list(edges) + assert Enum.any?(nodes, &(&1["id"] == "n1")) + assert Enum.any?(nodes, &(&1["id"] == "n2")) + assert [%{"start_id" => "n1", "end_id" => "n2"} | _] = edges + assert Topology.supports?(%{"results" => [payload]}) + end + end +end diff --git a/web-ng/test/integration/srql_nif_integration_test.exs b/web-ng/test/integration/srql_nif_integration_test.exs new file mode 100644 index 000000000..960e36a88 --- /dev/null +++ b/web-ng/test/integration/srql_nif_integration_test.exs @@ -0,0 +1,193 @@ +if System.get_env("SRQL_INTEGRATION") != "1" do + defmodule ServiceRadarWebNG.SRQLNifIntegrationTest do + use ExUnit.Case, async: true + + @moduletag :skip + + test "set SRQL_INTEGRATION=1 to enable" do + assert true + end + end +else + defmodule ServiceRadarWebNG.SRQLNifIntegrationTest do + use ExUnit.Case, async: false + + defmodule PostgrexHelpers do + @moduledoc false + + def connection_opts do + config = ServiceRadarWebNG.Repo.config() + + ssl = + case Keyword.get(config, :ssl, false) do + false -> false + true -> true + ssl_opts when is_list(ssl_opts) -> {:opts, ssl_opts} + end + + base = [ + hostname: Keyword.get(config, :hostname, "localhost"), + username: Keyword.get(config, :username, "postgres"), + password: Keyword.get(config, :password), + database: Keyword.get(config, :database), + port: Keyword.get(config, :port, 5432) + ] + + case ssl do + false -> base + true -> Keyword.put(base, :ssl, true) + {:opts, ssl_opts} -> base |> Keyword.put(:ssl, true) |> Keyword.put(:ssl_opts, ssl_opts) + end + end + + def create_pollers_table(conn) do + sql = """ + CREATE TABLE IF NOT EXISTS pollers ( + poller_id text PRIMARY KEY, + component_id text NULL, + registration_source text NULL, + status text NULL, + spiffe_identity text NULL, + first_registered timestamptz NULL, + first_seen timestamptz NULL, + last_seen timestamptz NULL, + metadata jsonb NULL, + created_by text NULL, + is_healthy boolean NULL, + agent_count int4 NULL, + checker_count int4 NULL, + updated_at timestamptz NULL + ) + """ + + Postgrex.query!(conn, sql, []) + end + + def insert_poller(conn, poller_id) do + now = DateTime.utc_now() + + sql = """ + INSERT INTO pollers ( + poller_id, + component_id, + registration_source, + status, + spiffe_identity, + first_registered, + first_seen, + last_seen, + metadata, + created_by, + is_healthy, + agent_count, + checker_count, + updated_at + ) VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8, + $9, + $10, + $11, + $12, + $13, + $14 + ) + ON CONFLICT (poller_id) DO UPDATE SET + component_id = excluded.component_id, + registration_source = excluded.registration_source, + status = excluded.status, + spiffe_identity = excluded.spiffe_identity, + first_registered = excluded.first_registered, + first_seen = excluded.first_seen, + last_seen = excluded.last_seen, + metadata = excluded.metadata, + created_by = excluded.created_by, + is_healthy = excluded.is_healthy, + agent_count = excluded.agent_count, + checker_count = excluded.checker_count, + updated_at = excluded.updated_at + """ + + Postgrex.query!(conn, sql, [ + poller_id, + "srql-itest", + "integration", + "ready", + "spiffe://example.test/poller", + now, + now, + now, + %{"integration" => true}, + "srql-nif-test", + true, + 7, + 3, + now + ]) + end + + def delete_poller(conn, poller_id) do + Postgrex.query!(conn, "DELETE FROM pollers WHERE poller_id = $1", [poller_id]) + end + end + + setup_all do + owner = + Ecto.Adapters.SQL.Sandbox.start_owner!(ServiceRadarWebNG.Repo, shared: true) + + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(owner) end) + + conn = start_supervised!({Postgrex, PostgrexHelpers.connection_opts()}) + PostgrexHelpers.create_pollers_table(conn) + + poller_id = "srql-itest-" <> Ecto.UUID.generate() + PostgrexHelpers.insert_poller(conn, poller_id) + + on_exit(fn -> + if Process.alive?(conn) do + try do + PostgrexHelpers.delete_poller(conn, poller_id) + catch + :exit, _ -> :ok + end + end + end) + + {:ok, poller_id: poller_id} + end + + test "translates SRQL pollers query via NIF", %{poller_id: poller_id} do + query = "in:pollers poller_id:#{poller_id} is_healthy:true limit:5" + + assert {:ok, json} = + ServiceRadarWebNG.SRQL.Native.translate(query, nil, nil, nil, nil) + + assert {:ok, %{"sql" => sql, "params" => params}} = Jason.decode(json) + assert is_binary(sql) + assert is_list(params) + end + + test "executes SRQL pollers query via NIF", %{poller_id: poller_id} do + query = "in:pollers poller_id:#{poller_id} is_healthy:true limit:5" + + assert {:ok, response} = ServiceRadarWebNG.SRQL.query(query) + assert is_map(response) + + results = Map.get(response, "results") + assert is_list(results) + assert length(results) == 1 + + poller = hd(results) + assert poller["poller_id"] == poller_id + assert poller["is_healthy"] == true + assert poller["agent_count"] == 7 + assert poller["checker_count"] == 3 + end + end +end diff --git a/web-ng/test/property/api_device_controller_property_test.exs b/web-ng/test/property/api_device_controller_property_test.exs new file mode 100644 index 000000000..09f983164 --- /dev/null +++ b/web-ng/test/property/api_device_controller_property_test.exs @@ -0,0 +1,43 @@ +defmodule ServiceRadarWebNG.ApiDeviceControllerPropertyTest do + use ServiceRadarWebNG.DataCase, async: true + use ExUnitProperties + + alias ServiceRadarWebNG.Api.DeviceController + alias ServiceRadarWebNG.Generators.SRQLGenerators + alias ServiceRadarWebNG.TestSupport.PropertyOpts + + defp devices_index_params do + StreamData.fixed_map(%{ + "limit" => SRQLGenerators.untrusted_param_value(), + "offset" => SRQLGenerators.untrusted_param_value(), + "page" => SRQLGenerators.untrusted_param_value(), + "search" => SRQLGenerators.untrusted_param_value(), + "status" => SRQLGenerators.untrusted_param_value(), + "poller_id" => SRQLGenerators.untrusted_param_value(), + "device_type" => SRQLGenerators.untrusted_param_value() + }) + end + + property "DeviceController.index/2 never crashes for untrusted query params" do + check all( + params <- devices_index_params(), + extra <- SRQLGenerators.json_map(max_length: 4), + max_runs: PropertyOpts.max_runs() + ) do + conn = Plug.Test.conn("GET", "/api/devices") + conn = DeviceController.index(conn, Map.merge(extra, params)) + assert conn.status in [200, 400] + end + end + + property "DeviceController.show/2 never crashes for untrusted device_id" do + check all( + device_id <- SRQLGenerators.untrusted_param_value(), + max_runs: PropertyOpts.max_runs() + ) do + conn = Plug.Test.conn("GET", "/api/devices/any") + conn = DeviceController.show(conn, %{"device_id" => device_id}) + assert conn.status in [200, 400, 404] + end + end +end diff --git a/web-ng/test/property/api_query_controller_property_test.exs b/web-ng/test/property/api_query_controller_property_test.exs new file mode 100644 index 000000000..e932b2caa --- /dev/null +++ b/web-ng/test/property/api_query_controller_property_test.exs @@ -0,0 +1,52 @@ +defmodule ServiceRadarWebNG.ApiQueryControllerPropertyTest do + use ExUnit.Case, async: true + use ExUnitProperties + + alias ServiceRadarWebNG.Generators.SRQLGenerators + alias ServiceRadarWebNG.TestSupport.PropertyOpts + alias ServiceRadarWebNG.TestSupport.SRQLStub + + setup do + old = Application.get_env(:serviceradar_web_ng, :srql_module) + Application.put_env(:serviceradar_web_ng, :srql_module, SRQLStub) + + on_exit(fn -> + if is_nil(old) do + Application.delete_env(:serviceradar_web_ng, :srql_module) + else + Application.put_env(:serviceradar_web_ng, :srql_module, old) + end + end) + + :ok + end + + property "QueryController.execute/2 never crashes for JSON-like maps" do + check all( + payload <- SRQLGenerators.json_map(), + max_runs: PropertyOpts.max_runs() + ) do + conn = Plug.Test.conn("POST", "/api/query") + conn = ServiceRadarWebNG.Api.QueryController.execute(conn, payload) + assert conn.status in [200, 400] + end + end + + property "JSON parser does not crash on malformed JSON bodies" do + invalid_json = + SRQLGenerators.json_map(max_length: 8) + |> StreamData.map(fn payload -> Jason.encode!(payload) <> "x" end) + + check all( + body <- invalid_json, + max_runs: PropertyOpts.max_runs(:slow_property) + ) do + conn = + Plug.Test.conn("POST", "/api/query", body) + |> Plug.Conn.put_req_header("content-type", "application/json") + |> ServiceRadarWebNGWeb.Endpoint.call([]) + + assert conn.status == 400 + end + end +end diff --git a/web-ng/test/property/edge_onboarding_token_property_test.exs b/web-ng/test/property/edge_onboarding_token_property_test.exs new file mode 100644 index 000000000..de913e2ef --- /dev/null +++ b/web-ng/test/property/edge_onboarding_token_property_test.exs @@ -0,0 +1,46 @@ +defmodule ServiceRadarWebNG.EdgeOnboardingTokenPropertyTest do + use ExUnit.Case, async: true + use ExUnitProperties + + alias ServiceRadarWebNG.Edge.OnboardingToken + alias ServiceRadarWebNG.Generators.EdgeOnboardingGenerators + alias ServiceRadarWebNG.TestSupport.PropertyOpts + + property "edge onboarding tokens round-trip and are base64url (no padding)" do + check all( + package_id <- EdgeOnboardingGenerators.package_id(), + download_token <- EdgeOnboardingGenerators.download_token(), + api <- EdgeOnboardingGenerators.core_api_url(), + max_runs: PropertyOpts.max_runs() + ) do + assert {:ok, token} = OnboardingToken.encode(package_id, download_token, api) + assert String.starts_with?(token, "edgepkg-v1:") + assert token =~ ~r/^edgepkg-v1:[A-Za-z0-9_-]+$/ + + assert {:ok, payload} = OnboardingToken.decode(token) + + expected = + %{pkg: package_id, dl: download_token} + |> maybe_put_api(api) + + assert payload == expected + end + end + + property "edge onboarding token decode never crashes for random strings" do + check all( + raw <- EdgeOnboardingGenerators.random_token_string(), + max_runs: PropertyOpts.max_runs(:slow_property) + ) do + result = OnboardingToken.decode(raw) + assert match?({:ok, _}, result) or match?({:error, _}, result) + end + end + + defp maybe_put_api(payload, nil), do: payload + + defp maybe_put_api(payload, api) when is_binary(api) do + api = String.trim(api) + if api == "", do: payload, else: Map.put(payload, :api, api) + end +end diff --git a/web-ng/test/property/srql_property_test.exs b/web-ng/test/property/srql_property_test.exs new file mode 100644 index 000000000..9d67901a1 --- /dev/null +++ b/web-ng/test/property/srql_property_test.exs @@ -0,0 +1,27 @@ +defmodule ServiceRadarWebNG.SRQLPropertyTest do + use ExUnit.Case, async: false + use ExUnitProperties + + alias ServiceRadarWebNG.Generators.SRQLGenerators + alias ServiceRadarWebNG.TestSupport.PropertyOpts + + property "SRQL.query/1 never crashes for printable strings" do + check all( + query <- SRQLGenerators.printable_query_string(), + max_runs: PropertyOpts.max_runs() + ) do + result = ServiceRadarWebNG.SRQL.query(query) + assert match?({:ok, _}, result) or match?({:error, _}, result) + end + end + + property "SRQL.query_request/1 never crashes for JSON-like maps" do + check all( + payload <- SRQLGenerators.json_map(), + max_runs: PropertyOpts.max_runs() + ) do + result = ServiceRadarWebNG.SRQL.query_request(payload) + assert match?({:ok, _}, result) or match?({:error, _}, result) + end + end +end diff --git a/web-ng/test/property/srql_query_input_property_test.exs b/web-ng/test/property/srql_query_input_property_test.exs new file mode 100644 index 000000000..3602b4916 --- /dev/null +++ b/web-ng/test/property/srql_query_input_property_test.exs @@ -0,0 +1,90 @@ +defmodule ServiceRadarWebNGWeb.SRQLQueryInputPropertyTest do + use ExUnit.Case, async: false + use ExUnitProperties + + alias ServiceRadarWebNG.Generators.SRQLGenerators + alias ServiceRadarWebNG.TestSupport.PropertyOpts + alias ServiceRadarWebNG.TestSupport.SRQLStub + alias ServiceRadarWebNGWeb.DashboardLive.Index, as: DashboardLive + alias ServiceRadarWebNGWeb.SRQL.Page, as: SRQLPage + + setup do + old = Application.get_env(:serviceradar_web_ng, :srql_module) + Application.put_env(:serviceradar_web_ng, :srql_module, SRQLStub) + + on_exit(fn -> + if is_nil(old) do + Application.delete_env(:serviceradar_web_ng, :srql_module) + else + Application.put_env(:serviceradar_web_ng, :srql_module, old) + end + end) + + :ok + end + + property "SRQL.Page.load_list/5 never crashes for malformed query params" do + check all( + q <- SRQLGenerators.untrusted_param_value(), + limit <- SRQLGenerators.untrusted_param_value(), + extra <- SRQLGenerators.json_map(max_length: 4), + uri <- StreamData.string(:printable, max_length: 160), + max_runs: PropertyOpts.max_runs() + ) do + socket = + %Phoenix.LiveView.Socket{} + |> SRQLPage.init("devices", builder_available: true) + + params = Map.merge(extra, %{"q" => q, "limit" => limit}) + + _socket = + SRQLPage.load_list(socket, params, uri, :results, default_limit: 100, max_limit: 500) + end + end + + property "DashboardLive.handle_params/3 never crashes for malformed query params" do + check all( + q <- SRQLGenerators.untrusted_param_value(), + limit <- SRQLGenerators.untrusted_param_value(), + extra <- SRQLGenerators.json_map(max_length: 4), + uri <- StreamData.string(:printable, max_length: 160), + max_runs: PropertyOpts.max_runs() + ) do + {:ok, socket} = DashboardLive.mount(%{}, %{}, %Phoenix.LiveView.Socket{}) + params = Map.merge(extra, %{"q" => q, "limit" => limit}) + assert {:noreply, _socket} = DashboardLive.handle_params(params, uri, socket) + end + end + + property "DashboardLive handle_event callbacks never crash for malformed params" do + check all( + q <- SRQLGenerators.untrusted_param_value(), + builder <- SRQLGenerators.untrusted_param_value(), + idx <- SRQLGenerators.untrusted_param_value(), + max_runs: PropertyOpts.max_runs() + ) do + {:ok, socket} = DashboardLive.mount(%{}, %{}, %Phoenix.LiveView.Socket{}) + + assert {:noreply, socket} = DashboardLive.handle_event("srql_change", %{"q" => q}, socket) + + assert {:noreply, socket} = + DashboardLive.handle_event("srql_builder_toggle", %{"q" => q}, socket) + + assert {:noreply, socket} = + DashboardLive.handle_event("srql_builder_change", %{"builder" => builder}, socket) + + assert {:noreply, socket} = + DashboardLive.handle_event( + "srql_builder_add_filter", + %{"builder" => builder}, + socket + ) + + assert {:noreply, socket} = + DashboardLive.handle_event("srql_builder_remove_filter", %{"idx" => idx}, socket) + + assert {:noreply, _socket} = + DashboardLive.handle_event("srql_builder_apply", %{"builder" => builder}, socket) + end + end +end diff --git a/web-ng/test/serviceradar_web_ng/accounts_test.exs b/web-ng/test/serviceradar_web_ng/accounts_test.exs new file mode 100644 index 000000000..754e7061e --- /dev/null +++ b/web-ng/test/serviceradar_web_ng/accounts_test.exs @@ -0,0 +1,421 @@ +defmodule ServiceRadarWebNG.AccountsTest do + use ServiceRadarWebNG.DataCase + + alias ServiceRadarWebNG.Accounts + + import Ecto.Query, only: [from: 2] + import ServiceRadarWebNG.AccountsFixtures + alias ServiceRadarWebNG.Accounts.{User, UserToken} + + describe "get_user_by_email/1" do + test "does not return the user if the email does not exist" do + refute Accounts.get_user_by_email("unknown@example.com") + end + + test "returns the user if the email exists" do + %{id: id} = user = user_fixture() + assert %User{id: ^id} = Accounts.get_user_by_email(user.email) + end + end + + describe "get_user_by_email_and_password/2" do + test "does not return the user if the email does not exist" do + refute Accounts.get_user_by_email_and_password("unknown@example.com", "hello world!") + end + + test "does not return the user if the password is not valid" do + user = user_fixture() |> set_password() + refute Accounts.get_user_by_email_and_password(user.email, "invalid") + end + + test "returns the user if the email and password are valid" do + %{id: id} = user = user_fixture() |> set_password() + + assert %User{id: ^id} = + Accounts.get_user_by_email_and_password(user.email, valid_user_password()) + end + end + + describe "get_user!/1" do + test "raises if id is invalid" do + assert_raise Ecto.NoResultsError, fn -> + Accounts.get_user!(-1) + end + end + + test "returns the user with the given id" do + %{id: id} = user = user_fixture() + assert %User{id: ^id} = Accounts.get_user!(user.id) + end + end + + describe "register_user/1" do + test "requires email to be set" do + {:error, changeset} = Accounts.register_user(%{}) + + assert %{email: ["can't be blank"]} = errors_on(changeset) + end + + test "validates email when given" do + {:error, changeset} = Accounts.register_user(%{email: "not valid"}) + + assert %{email: ["must have the @ sign and no spaces"]} = errors_on(changeset) + end + + test "validates maximum values for email for security" do + too_long = String.duplicate("db", 100) + {:error, changeset} = Accounts.register_user(%{email: too_long}) + assert "should be at most 160 character(s)" in errors_on(changeset).email + end + + test "validates email uniqueness" do + %{email: email} = user_fixture() + {:error, changeset} = Accounts.register_user(%{email: email}) + assert "has already been taken" in errors_on(changeset).email + + # Now try with the uppercased email too, to check that email case is ignored. + {:error, changeset} = Accounts.register_user(%{email: String.upcase(email)}) + assert "has already been taken" in errors_on(changeset).email + end + + test "registers users without password" do + email = unique_user_email() + {:ok, user} = Accounts.register_user(valid_user_attributes(email: email)) + assert user.email == email + assert is_nil(user.hashed_password) + assert is_nil(user.confirmed_at) + assert is_nil(user.password) + end + end + + describe "sudo_mode?/2" do + test "validates the authenticated_at time" do + now = DateTime.utc_now() + + assert Accounts.sudo_mode?(%User{authenticated_at: DateTime.utc_now()}) + assert Accounts.sudo_mode?(%User{authenticated_at: DateTime.add(now, -19, :minute)}) + refute Accounts.sudo_mode?(%User{authenticated_at: DateTime.add(now, -21, :minute)}) + + # minute override + refute Accounts.sudo_mode?( + %User{authenticated_at: DateTime.add(now, -11, :minute)}, + -10 + ) + + # not authenticated + refute Accounts.sudo_mode?(%User{}) + end + end + + describe "change_user_email/3" do + test "returns a user changeset" do + assert %Ecto.Changeset{} = changeset = Accounts.change_user_email(%User{}) + assert changeset.required == [:email] + end + end + + describe "deliver_user_update_email_instructions/3" do + setup do + %{user: user_fixture()} + end + + test "sends token through notification", %{user: user} do + token = + extract_user_token(fn url -> + Accounts.deliver_user_update_email_instructions(user, "current@example.com", url) + end) + + {:ok, token} = Base.url_decode64(token, padding: false) + assert user_token = Repo.get_by(UserToken, token: :crypto.hash(:sha256, token)) + assert user_token.user_id == user.id + assert user_token.sent_to == user.email + assert user_token.context == "change:current@example.com" + end + end + + describe "update_user_email/2" do + setup do + user = unconfirmed_user_fixture() + email = unique_user_email() + + token = + extract_user_token(fn url -> + Accounts.deliver_user_update_email_instructions(%{user | email: email}, user.email, url) + end) + + %{user: user, token: token, email: email} + end + + test "updates the email with a valid token", %{user: user, token: token, email: email} do + assert {:ok, %{email: ^email}} = Accounts.update_user_email(user, token) + changed_user = Repo.get!(User, user.id) + assert changed_user.email != user.email + assert changed_user.email == email + refute Repo.get_by(UserToken, user_id: user.id) + end + + test "does not update email with invalid token", %{user: user} do + assert Accounts.update_user_email(user, "oops") == + {:error, :transaction_aborted} + + assert Repo.get!(User, user.id).email == user.email + assert Repo.get_by(UserToken, user_id: user.id) + end + + test "does not update email if user email changed", %{user: user, token: token} do + assert Accounts.update_user_email(%{user | email: "current@example.com"}, token) == + {:error, :transaction_aborted} + + assert Repo.get!(User, user.id).email == user.email + assert Repo.get_by(UserToken, user_id: user.id) + end + + test "does not update email if token expired", %{user: user, token: token} do + {count, nil} = + Repo.update_all( + from(ut in UserToken, where: ut.user_id == ^user.id), + set: [inserted_at: ~N[2020-01-01 00:00:00]] + ) + + assert count >= 1 + + assert Accounts.update_user_email(user, token) == + {:error, :transaction_aborted} + + assert Repo.get!(User, user.id).email == user.email + assert Repo.get_by(UserToken, user_id: user.id) + end + end + + describe "change_user_password/3" do + test "returns a user changeset" do + assert %Ecto.Changeset{} = changeset = Accounts.change_user_password(%User{}) + assert changeset.required == [:password] + end + + test "allows fields to be set" do + changeset = + Accounts.change_user_password( + %User{}, + %{ + "password" => "new valid password" + }, + hash_password: false + ) + + assert changeset.valid? + assert get_change(changeset, :password) == "new valid password" + assert is_nil(get_change(changeset, :hashed_password)) + end + end + + describe "update_user_password/2" do + setup do + %{user: user_fixture()} + end + + test "validates password", %{user: user} do + {:error, changeset} = + Accounts.update_user_password(user, %{ + password: "not valid", + password_confirmation: "another" + }) + + assert %{ + password: ["should be at least 12 character(s)"], + password_confirmation: ["does not match password"] + } = errors_on(changeset) + end + + test "validates maximum values for password for security", %{user: user} do + too_long = String.duplicate("db", 100) + + {:error, changeset} = + Accounts.update_user_password(user, %{password: too_long}) + + assert "should be at most 72 character(s)" in errors_on(changeset).password + end + + test "updates the password", %{user: user} do + {:ok, {user, expired_tokens}} = + Accounts.update_user_password(user, %{ + password: "new valid password" + }) + + assert expired_tokens == [] + assert is_nil(user.password) + assert Accounts.get_user_by_email_and_password(user.email, "new valid password") + end + + test "deletes all tokens for the given user", %{user: user} do + _ = Accounts.generate_user_session_token(user) + + {:ok, {_, _}} = + Accounts.update_user_password(user, %{ + password: "new valid password" + }) + + refute Repo.get_by(UserToken, user_id: user.id) + end + end + + describe "generate_user_session_token/1" do + setup do + %{user: user_fixture()} + end + + test "generates a token", %{user: user} do + token = Accounts.generate_user_session_token(user) + assert user_token = Repo.get_by(UserToken, token: token) + assert user_token.context == "session" + assert user_token.authenticated_at != nil + + # Creating the same token for another user should fail + assert_raise Ecto.ConstraintError, fn -> + Repo.insert!(%UserToken{ + token: user_token.token, + user_id: user_fixture().id, + context: "session" + }) + end + end + + test "duplicates the authenticated_at of given user in new token", %{user: user} do + user = %{user | authenticated_at: DateTime.add(DateTime.utc_now(:second), -3600)} + token = Accounts.generate_user_session_token(user) + assert user_token = Repo.get_by(UserToken, token: token) + assert user_token.authenticated_at == user.authenticated_at + assert DateTime.compare(user_token.inserted_at, user.authenticated_at) == :gt + end + end + + describe "get_user_by_session_token/1" do + setup do + user = user_fixture() + token = Accounts.generate_user_session_token(user) + %{user: user, token: token} + end + + test "returns user by token", %{user: user, token: token} do + assert {session_user, token_inserted_at} = Accounts.get_user_by_session_token(token) + assert session_user.id == user.id + assert session_user.authenticated_at != nil + assert token_inserted_at != nil + end + + test "does not return user for invalid token" do + refute Accounts.get_user_by_session_token("oops") + end + + test "does not return user for expired token", %{user: user, token: token} do + dt = ~N[2020-01-01 00:00:00] + + {count, nil} = + Repo.update_all(from(ut in UserToken, where: ut.user_id == ^user.id), + set: [inserted_at: dt, authenticated_at: dt] + ) + + assert count >= 1 + refute Accounts.get_user_by_session_token(token) + end + end + + describe "get_user_by_magic_link_token/1" do + setup do + user = user_fixture() + {encoded_token, _hashed_token} = generate_user_magic_link_token(user) + %{user: user, token: encoded_token} + end + + test "returns user by token", %{user: user, token: token} do + assert session_user = Accounts.get_user_by_magic_link_token(token) + assert session_user.id == user.id + end + + test "does not return user for invalid token" do + refute Accounts.get_user_by_magic_link_token("oops") + end + + test "does not return user for expired token", %{user: user, token: token} do + {count, nil} = + Repo.update_all(from(ut in UserToken, where: ut.user_id == ^user.id), + set: [inserted_at: ~N[2020-01-01 00:00:00]] + ) + + assert count >= 1 + refute Accounts.get_user_by_magic_link_token(token) + end + end + + describe "login_user_by_magic_link/1" do + test "confirms user and expires tokens" do + user = unconfirmed_user_fixture() + refute user.confirmed_at + {encoded_token, hashed_token} = generate_user_magic_link_token(user) + + assert {:ok, {user, [%{token: ^hashed_token}]}} = + Accounts.login_user_by_magic_link(encoded_token) + + assert user.confirmed_at + end + + test "returns user and (deleted) token for confirmed user" do + user = user_fixture() + assert user.confirmed_at + {encoded_token, _hashed_token} = generate_user_magic_link_token(user) + assert {:ok, {^user, []}} = Accounts.login_user_by_magic_link(encoded_token) + # one time use only + assert {:error, :not_found} = Accounts.login_user_by_magic_link(encoded_token) + end + + test "raises when unconfirmed user has password set" do + user = unconfirmed_user_fixture() + + {count, nil} = + Repo.update_all(from(u in User, where: u.id == ^user.id), + set: [hashed_password: "hashed"] + ) + + assert count == 1 + {encoded_token, _hashed_token} = generate_user_magic_link_token(user) + + assert_raise RuntimeError, ~r/magic link log in is not allowed/, fn -> + Accounts.login_user_by_magic_link(encoded_token) + end + end + end + + describe "delete_user_session_token/1" do + test "deletes the token" do + user = user_fixture() + token = Accounts.generate_user_session_token(user) + assert Accounts.delete_user_session_token(token) == :ok + refute Accounts.get_user_by_session_token(token) + end + end + + describe "deliver_login_instructions/2" do + setup do + %{user: unconfirmed_user_fixture()} + end + + test "sends token through notification", %{user: user} do + token = + extract_user_token(fn url -> + Accounts.deliver_login_instructions(user, url) + end) + + {:ok, token} = Base.url_decode64(token, padding: false) + assert user_token = Repo.get_by(UserToken, token: :crypto.hash(:sha256, token)) + assert user_token.user_id == user.id + assert user_token.sent_to == user.email + assert user_token.context == "login" + end + end + + describe "inspect/2 for the User module" do + test "does not include password" do + refute inspect(%User{password: "123456"}) =~ "password: \"123456\"" + end + end +end diff --git a/web-ng/test/serviceradar_web_ng/infrastructure_test.exs b/web-ng/test/serviceradar_web_ng/infrastructure_test.exs new file mode 100644 index 000000000..235a09eef --- /dev/null +++ b/web-ng/test/serviceradar_web_ng/infrastructure_test.exs @@ -0,0 +1,28 @@ +defmodule ServiceRadarWebNG.InfrastructureTest do + use ServiceRadarWebNG.DataCase, async: true + + alias ServiceRadarWebNG.Infrastructure + alias ServiceRadarWebNG.Repo + + test "list_pollers returns pollers ordered by last_seen desc" do + Repo.insert_all("pollers", [ + %{ + poller_id: "test-poller-1", + last_seen: ~U[2025-01-01 00:00:00Z] + }, + %{ + poller_id: "test-poller-2", + last_seen: ~U[2025-02-01 00:00:00Z] + } + ]) + + pollers = Infrastructure.list_pollers(limit: 10) + ids = Enum.map(pollers, & &1.id) + + assert "test-poller-2" in ids + assert "test-poller-1" in ids + + assert Enum.find_index(ids, &(&1 == "test-poller-2")) < + Enum.find_index(ids, &(&1 == "test-poller-1")) + end +end diff --git a/web-ng/test/serviceradar_web_ng/inventory_test.exs b/web-ng/test/serviceradar_web_ng/inventory_test.exs new file mode 100644 index 000000000..acc139113 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng/inventory_test.exs @@ -0,0 +1,30 @@ +defmodule ServiceRadarWebNG.InventoryTest do + use ServiceRadarWebNG.DataCase, async: true + + alias ServiceRadarWebNG.Inventory + alias ServiceRadarWebNG.Repo + + test "list_devices returns devices ordered by last_seen desc" do + Repo.insert_all("unified_devices", [ + %{ + device_id: "test-device-1", + hostname: "a", + last_seen: ~U[2025-01-01 00:00:00Z] + }, + %{ + device_id: "test-device-2", + hostname: "b", + last_seen: ~U[2025-02-01 00:00:00Z] + } + ]) + + devices = Inventory.list_devices(limit: 10) + ids = Enum.map(devices, & &1.id) + + assert "test-device-2" in ids + assert "test-device-1" in ids + + assert Enum.find_index(ids, &(&1 == "test-device-2")) < + Enum.find_index(ids, &(&1 == "test-device-1")) + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/controllers/error_html_test.exs b/web-ng/test/serviceradar_web_ng_web/controllers/error_html_test.exs new file mode 100644 index 000000000..bad755dc2 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/controllers/error_html_test.exs @@ -0,0 +1,15 @@ +defmodule ServiceRadarWebNGWeb.ErrorHTMLTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + # Bring render_to_string/4 for testing custom views + import Phoenix.Template, only: [render_to_string: 4] + + test "renders 404.html" do + assert render_to_string(ServiceRadarWebNGWeb.ErrorHTML, "404", "html", []) == "Not Found" + end + + test "renders 500.html" do + assert render_to_string(ServiceRadarWebNGWeb.ErrorHTML, "500", "html", []) == + "Internal Server Error" + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/controllers/error_json_test.exs b/web-ng/test/serviceradar_web_ng_web/controllers/error_json_test.exs new file mode 100644 index 000000000..d80cdb1fb --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/controllers/error_json_test.exs @@ -0,0 +1,14 @@ +defmodule ServiceRadarWebNGWeb.ErrorJSONTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + test "renders 404" do + assert ServiceRadarWebNGWeb.ErrorJSON.render("404.json", %{}) == %{ + errors: %{detail: "Not Found"} + } + end + + test "renders 500" do + assert ServiceRadarWebNGWeb.ErrorJSON.render("500.json", %{}) == + %{errors: %{detail: "Internal Server Error"}} + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/controllers/page_controller_test.exs b/web-ng/test/serviceradar_web_ng_web/controllers/page_controller_test.exs new file mode 100644 index 000000000..2725182c0 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/controllers/page_controller_test.exs @@ -0,0 +1,8 @@ +defmodule ServiceRadarWebNGWeb.PageControllerTest do + use ServiceRadarWebNGWeb.ConnCase + + test "GET /", %{conn: conn} do + conn = get(conn, ~p"/") + assert html_response(conn, 200) =~ "Peace of mind from prototype to production" + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/controllers/user_session_controller_test.exs b/web-ng/test/serviceradar_web_ng_web/controllers/user_session_controller_test.exs new file mode 100644 index 000000000..54f88717a --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/controllers/user_session_controller_test.exs @@ -0,0 +1,147 @@ +defmodule ServiceRadarWebNGWeb.UserSessionControllerTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + import ServiceRadarWebNG.AccountsFixtures + alias ServiceRadarWebNG.Accounts + + setup do + %{unconfirmed_user: unconfirmed_user_fixture(), user: user_fixture()} + end + + describe "POST /users/log-in - email and password" do + test "logs the user in", %{conn: conn, user: user} do + user = set_password(user) + + conn = + post(conn, ~p"/users/log-in", %{ + "user" => %{"email" => user.email, "password" => valid_user_password()} + }) + + assert get_session(conn, :user_token) + assert redirected_to(conn) == ~p"/" + + # Now do a logged in request and assert on the menu + conn = get(conn, ~p"/dashboard") + response = html_response(conn, 200) + assert response =~ user.email + assert response =~ ~p"/users/settings" + assert response =~ ~p"/users/log-out" + end + + test "logs the user in with remember me", %{conn: conn, user: user} do + user = set_password(user) + + conn = + post(conn, ~p"/users/log-in", %{ + "user" => %{ + "email" => user.email, + "password" => valid_user_password(), + "remember_me" => "true" + } + }) + + assert conn.resp_cookies["_service_radar_web_ng_web_user_remember_me"] + assert redirected_to(conn) == ~p"/" + end + + test "logs the user in with return to", %{conn: conn, user: user} do + user = set_password(user) + + conn = + conn + |> init_test_session(user_return_to: "/foo/bar") + |> post(~p"/users/log-in", %{ + "user" => %{ + "email" => user.email, + "password" => valid_user_password() + } + }) + + assert redirected_to(conn) == "/foo/bar" + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "Welcome back!" + end + + test "redirects to login page with invalid credentials", %{conn: conn, user: user} do + conn = + post(conn, ~p"/users/log-in?mode=password", %{ + "user" => %{"email" => user.email, "password" => "invalid_password"} + }) + + assert Phoenix.Flash.get(conn.assigns.flash, :error) == "Invalid email or password" + assert redirected_to(conn) == ~p"/users/log-in" + end + end + + describe "POST /users/log-in - magic link" do + test "logs the user in", %{conn: conn, user: user} do + {token, _hashed_token} = generate_user_magic_link_token(user) + + conn = + post(conn, ~p"/users/log-in", %{ + "user" => %{"token" => token} + }) + + assert get_session(conn, :user_token) + assert redirected_to(conn) == ~p"/" + + # Now do a logged in request and assert on the menu + conn = get(conn, ~p"/dashboard") + response = html_response(conn, 200) + assert response =~ user.email + assert response =~ ~p"/users/settings" + assert response =~ ~p"/users/log-out" + end + + test "confirms unconfirmed user", %{conn: conn, unconfirmed_user: user} do + {token, _hashed_token} = generate_user_magic_link_token(user) + refute user.confirmed_at + + conn = + post(conn, ~p"/users/log-in", %{ + "user" => %{"token" => token}, + "_action" => "confirmed" + }) + + assert get_session(conn, :user_token) + assert redirected_to(conn) == ~p"/" + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "User confirmed successfully." + + assert Accounts.get_user!(user.id).confirmed_at + + # Now do a logged in request and assert on the menu + conn = get(conn, ~p"/dashboard") + response = html_response(conn, 200) + assert response =~ user.email + assert response =~ ~p"/users/settings" + assert response =~ ~p"/users/log-out" + end + + test "redirects to login page when magic link is invalid", %{conn: conn} do + conn = + post(conn, ~p"/users/log-in", %{ + "user" => %{"token" => "invalid"} + }) + + assert Phoenix.Flash.get(conn.assigns.flash, :error) == + "The link is invalid or it has expired." + + assert redirected_to(conn) == ~p"/users/log-in" + end + end + + describe "DELETE /users/log-out" do + test "logs the user out", %{conn: conn, user: user} do + conn = conn |> log_in_user(user) |> delete(~p"/users/log-out") + assert redirected_to(conn) == ~p"/" + refute get_session(conn, :user_token) + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "Logged out successfully" + end + + test "succeeds even if the user is not logged in", %{conn: conn} do + conn = delete(conn, ~p"/users/log-out") + assert redirected_to(conn) == ~p"/" + refute get_session(conn, :user_token) + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "Logged out successfully" + end + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/dashboard_engine_test.exs b/web-ng/test/serviceradar_web_ng_web/dashboard_engine_test.exs new file mode 100644 index 000000000..32d2afb66 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/dashboard_engine_test.exs @@ -0,0 +1,44 @@ +defmodule ServiceRadarWebNGWeb.DashboardEngineTest do + use ExUnit.Case, async: true + + alias ServiceRadarWebNGWeb.Dashboard.Engine + alias ServiceRadarWebNGWeb.Dashboard.Plugins + + test "selects timeseries plugin when SRQL viz suggests timeseries" do + response = %{ + "results" => [ + %{"timestamp" => "2025-01-01T00:00:00Z", "series" => "cpu", "value" => 1.0}, + %{"timestamp" => "2025-01-01T00:01:00Z", "series" => "cpu", "value" => 2.0} + ], + "viz" => %{ + "suggestions" => [ + %{"kind" => "timeseries", "x" => "timestamp", "y" => "value", "series" => "series"} + ] + } + } + + panels = Engine.build_panels(response) + assert Enum.any?(panels, &(&1.plugin == Plugins.Timeseries)) + assert Enum.any?(panels, &(&1.plugin == Plugins.Table)) + + timeseries_panel = Enum.find(panels, &(&1.plugin == Plugins.Timeseries)) + assert is_map(timeseries_panel.assigns) + assert timeseries_panel.assigns.spec[:x] == "timestamp" + end + + test "selects topology plugin when graph payload includes nodes and edges" do + response = %{ + "results" => [%{"nodes" => [%{"id" => "n1", "label" => "Node"}], "edges" => []}], + "viz" => %{"columns" => [%{"name" => "result", "type" => "jsonb"}]} + } + + panels = Engine.build_panels(response) + assert Enum.any?(panels, &(&1.plugin == Plugins.Topology)) + assert Enum.any?(panels, &(&1.plugin == Plugins.Table)) + end + + test "falls back to table plugin when no other plugin matches" do + response = %{"results" => [%{"a" => 1}], "viz" => %{"suggestions" => [%{"kind" => "table"}]}} + assert [%{plugin: Plugins.Table}] = Engine.build_panels(response) + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/dashboard_topology_plugin_test.exs b/web-ng/test/serviceradar_web_ng_web/dashboard_topology_plugin_test.exs new file mode 100644 index 000000000..a00d5a886 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/dashboard_topology_plugin_test.exs @@ -0,0 +1,31 @@ +defmodule ServiceRadarWebNGWeb.DashboardTopologyPluginTest do + use ExUnit.Case, async: true + + alias ServiceRadarWebNGWeb.Dashboard.Plugins.Topology + + test "supports? detects graph payloads" do + assert Topology.supports?(%{"results" => [%{"nodes" => [], "edges" => []}]}) + refute Topology.supports?(%{"results" => [%{"a" => 1}]}) + end + + test "build merges nodes and edges across rows and normalizes ids" do + response = %{ + "results" => [ + %{ + "nodes" => [%{"device_id" => "dev-1", "hostname" => "device-1"}], + "edges" => [%{"source" => "dev-1", "target" => "dev-2", "type" => "links_to"}] + }, + %{ + "nodes" => [%{"id" => "dev-2", "label" => "device-2"}], + "edges" => [] + } + ] + } + + assert {:ok, assigns} = Topology.build(response) + assert length(assigns.nodes) == 2 + assert Enum.any?(assigns.nodes, &(&1.id == "dev-1")) + assert Enum.any?(assigns.nodes, &(&1.id == "dev-2")) + assert [%{source: "dev-1", target: "dev-2"}] = assigns.edges + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/live/device_live_test.exs b/web-ng/test/serviceradar_web_ng_web/live/device_live_test.exs new file mode 100644 index 000000000..40c767eb6 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/live/device_live_test.exs @@ -0,0 +1,27 @@ +defmodule ServiceRadarWebNGWeb.DeviceLiveTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + alias ServiceRadarWebNG.Repo + import Phoenix.LiveViewTest + + setup :register_and_log_in_user + + test "renders devices from unified_devices", %{conn: conn} do + device_id = "test-device-live-#{System.unique_integer([:positive])}" + + Repo.insert_all("unified_devices", [ + %{ + device_id: device_id, + hostname: "test-host", + is_available: true, + first_seen: ~U[2100-01-01 00:00:00Z], + last_seen: ~U[2100-01-01 00:00:00Z] + } + ]) + + {:ok, _lv, html} = live(conn, ~p"/devices?limit=10") + assert html =~ device_id + assert html =~ "test-host" + assert html =~ "in:devices" + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/live/poller_live_test.exs b/web-ng/test/serviceradar_web_ng_web/live/poller_live_test.exs new file mode 100644 index 000000000..679ed8ff0 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/live/poller_live_test.exs @@ -0,0 +1,25 @@ +defmodule ServiceRadarWebNGWeb.PollerLiveTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + alias ServiceRadarWebNG.Repo + import Phoenix.LiveViewTest + + setup :register_and_log_in_user + + test "renders pollers from pollers table", %{conn: conn} do + poller_id = "test-poller-live-#{System.unique_integer([:positive])}" + + Repo.insert_all("pollers", [ + %{ + poller_id: poller_id, + last_seen: ~U[2100-01-01 00:00:00Z], + status: "active" + } + ]) + + {:ok, _lv, html} = live(conn, ~p"/pollers?limit=10") + assert html =~ poller_id + assert html =~ "active" + assert html =~ "in:pollers" + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/live/user_live/confirmation_test.exs b/web-ng/test/serviceradar_web_ng_web/live/user_live/confirmation_test.exs new file mode 100644 index 000000000..540d540d8 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/live/user_live/confirmation_test.exs @@ -0,0 +1,118 @@ +defmodule ServiceRadarWebNGWeb.UserLive.ConfirmationTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + import Phoenix.LiveViewTest + import ServiceRadarWebNG.AccountsFixtures + + alias ServiceRadarWebNG.Accounts + + setup do + %{unconfirmed_user: unconfirmed_user_fixture(), confirmed_user: user_fixture()} + end + + describe "Confirm user" do + test "renders confirmation page for unconfirmed user", %{conn: conn, unconfirmed_user: user} do + token = + extract_user_token(fn url -> + Accounts.deliver_login_instructions(user, url) + end) + + {:ok, _lv, html} = live(conn, ~p"/users/log-in/#{token}") + assert html =~ "Confirm and stay logged in" + end + + test "renders login page for confirmed user", %{conn: conn, confirmed_user: user} do + token = + extract_user_token(fn url -> + Accounts.deliver_login_instructions(user, url) + end) + + {:ok, _lv, html} = live(conn, ~p"/users/log-in/#{token}") + refute html =~ "Confirm my account" + assert html =~ "Keep me logged in on this device" + end + + test "renders login page for already logged in user", %{conn: conn, confirmed_user: user} do + conn = log_in_user(conn, user) + + token = + extract_user_token(fn url -> + Accounts.deliver_login_instructions(user, url) + end) + + {:ok, _lv, html} = live(conn, ~p"/users/log-in/#{token}") + refute html =~ "Confirm my account" + assert html =~ "Log in" + end + + test "confirms the given token once", %{conn: conn, unconfirmed_user: user} do + token = + extract_user_token(fn url -> + Accounts.deliver_login_instructions(user, url) + end) + + {:ok, lv, _html} = live(conn, ~p"/users/log-in/#{token}") + + form = form(lv, "#confirmation_form", %{"user" => %{"token" => token}}) + render_submit(form) + + conn = follow_trigger_action(form, conn) + + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ + "User confirmed successfully" + + assert Accounts.get_user!(user.id).confirmed_at + # we are logged in now + assert get_session(conn, :user_token) + assert redirected_to(conn) == ~p"/" + + # log out, new conn + conn = build_conn() + + {:ok, _lv, html} = + live(conn, ~p"/users/log-in/#{token}") + |> follow_redirect(conn, ~p"/users/log-in") + + assert html =~ "Magic link is invalid or it has expired" + end + + test "logs confirmed user in without changing confirmed_at", %{ + conn: conn, + confirmed_user: user + } do + token = + extract_user_token(fn url -> + Accounts.deliver_login_instructions(user, url) + end) + + {:ok, lv, _html} = live(conn, ~p"/users/log-in/#{token}") + + form = form(lv, "#login_form", %{"user" => %{"token" => token}}) + render_submit(form) + + conn = follow_trigger_action(form, conn) + + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ + "Welcome back!" + + assert Accounts.get_user!(user.id).confirmed_at == user.confirmed_at + + # log out, new conn + conn = build_conn() + + {:ok, _lv, html} = + live(conn, ~p"/users/log-in/#{token}") + |> follow_redirect(conn, ~p"/users/log-in") + + assert html =~ "Magic link is invalid or it has expired" + end + + test "raises error for invalid token", %{conn: conn} do + {:ok, _lv, html} = + live(conn, ~p"/users/log-in/invalid-token") + |> follow_redirect(conn, ~p"/users/log-in") + + assert html =~ "Magic link is invalid or it has expired" + end + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/live/user_live/login_test.exs b/web-ng/test/serviceradar_web_ng_web/live/user_live/login_test.exs new file mode 100644 index 000000000..546615af1 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/live/user_live/login_test.exs @@ -0,0 +1,111 @@ +defmodule ServiceRadarWebNGWeb.UserLive.LoginTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + import Phoenix.LiveViewTest + import ServiceRadarWebNG.AccountsFixtures + + describe "login page" do + test "renders login page", %{conn: conn} do + {:ok, _lv, html} = live(conn, ~p"/users/log-in") + + assert html =~ "Log in" + assert html =~ "Register" + assert html =~ "Log in with email" + end + end + + describe "user login - magic link" do + test "sends magic link email when user exists", %{conn: conn} do + user = user_fixture() + + {:ok, lv, _html} = live(conn, ~p"/users/log-in") + + {:ok, _lv, html} = + form(lv, "#login_form_magic", user: %{email: user.email}) + |> render_submit() + |> follow_redirect(conn, ~p"/users/log-in") + + assert html =~ "If your email is in our system" + + assert ServiceRadarWebNG.Repo.get_by!(ServiceRadarWebNG.Accounts.UserToken, + user_id: user.id + ).context == + "login" + end + + test "does not disclose if user is registered", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/log-in") + + {:ok, _lv, html} = + form(lv, "#login_form_magic", user: %{email: "idonotexist@example.com"}) + |> render_submit() + |> follow_redirect(conn, ~p"/users/log-in") + + assert html =~ "If your email is in our system" + end + end + + describe "user login - password" do + test "redirects if user logs in with valid credentials", %{conn: conn} do + user = user_fixture() |> set_password() + + {:ok, lv, _html} = live(conn, ~p"/users/log-in") + + form = + form(lv, "#login_form_password", + user: %{email: user.email, password: valid_user_password(), remember_me: true} + ) + + conn = submit_form(form, conn) + + assert redirected_to(conn) == ~p"/" + end + + test "redirects to login page with a flash error if credentials are invalid", %{ + conn: conn + } do + {:ok, lv, _html} = live(conn, ~p"/users/log-in") + + form = + form(lv, "#login_form_password", user: %{email: "test@email.com", password: "123456"}) + + render_submit(form, %{user: %{remember_me: true}}) + + conn = follow_trigger_action(form, conn) + assert Phoenix.Flash.get(conn.assigns.flash, :error) == "Invalid email or password" + assert redirected_to(conn) == ~p"/users/log-in" + end + end + + describe "login navigation" do + test "redirects to registration page when the Register button is clicked", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/log-in") + + {:ok, _login_live, login_html} = + lv + |> element("main a", "Sign up") + |> render_click() + |> follow_redirect(conn, ~p"/users/register") + + assert login_html =~ "Register" + end + end + + describe "re-authentication (sudo mode)" do + setup %{conn: conn} do + user = user_fixture() + %{user: user, conn: log_in_user(conn, user)} + end + + test "shows login page with email filled in", %{conn: conn, user: user} do + {:ok, _lv, html} = live(conn, ~p"/users/log-in") + + assert html =~ "You need to reauthenticate" + refute html =~ "Register" + assert html =~ "Log in with email" + + assert html =~ + ~s( log_in_user(user_fixture()) + |> live(~p"/users/register") + |> follow_redirect(conn, ~p"/") + + assert {:ok, _conn} = result + end + + test "renders errors for invalid data", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/register") + + result = + lv + |> element("#registration_form") + |> render_change(user: %{"email" => "with spaces"}) + + assert result =~ "Register" + assert result =~ "must have the @ sign and no spaces" + end + end + + describe "register user" do + test "creates account but does not log in", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/register") + + email = unique_user_email() + form = form(lv, "#registration_form", user: valid_user_attributes(email: email)) + + {:ok, _lv, html} = + render_submit(form) + |> follow_redirect(conn, ~p"/users/log-in") + + assert html =~ + ~r/An email was sent to .*, please access it to confirm your account/ + end + + test "renders errors for duplicated email", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/register") + + user = user_fixture(%{email: "test@email.com"}) + + result = + lv + |> form("#registration_form", + user: %{"email" => user.email} + ) + |> render_submit() + + assert result =~ "has already been taken" + end + end + + describe "registration navigation" do + test "redirects to login page when the Log in button is clicked", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/register") + + {:ok, _login_live, login_html} = + lv + |> element("main a", "Log in") + |> render_click() + |> follow_redirect(conn, ~p"/users/log-in") + + assert login_html =~ "Log in" + end + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/live/user_live/settings_test.exs b/web-ng/test/serviceradar_web_ng_web/live/user_live/settings_test.exs new file mode 100644 index 000000000..dad4e0dea --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/live/user_live/settings_test.exs @@ -0,0 +1,212 @@ +defmodule ServiceRadarWebNGWeb.UserLive.SettingsTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + alias ServiceRadarWebNG.Accounts + import Phoenix.LiveViewTest + import ServiceRadarWebNG.AccountsFixtures + + describe "Settings page" do + test "renders settings page", %{conn: conn} do + {:ok, _lv, html} = + conn + |> log_in_user(user_fixture()) + |> live(~p"/users/settings") + + assert html =~ "Change Email" + assert html =~ "Save Password" + end + + test "redirects if user is not logged in", %{conn: conn} do + assert {:error, redirect} = live(conn, ~p"/users/settings") + + assert {:redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"/users/log-in" + assert %{"error" => "You must log in to access this page."} = flash + end + + test "redirects if user is not in sudo mode", %{conn: conn} do + {:ok, conn} = + conn + |> log_in_user(user_fixture(), + token_authenticated_at: DateTime.add(DateTime.utc_now(:second), -11, :minute) + ) + |> live(~p"/users/settings") + |> follow_redirect(conn, ~p"/users/log-in") + + assert conn.resp_body =~ "You must re-authenticate to access this page." + end + end + + describe "update email form" do + setup %{conn: conn} do + user = user_fixture() + %{conn: log_in_user(conn, user), user: user} + end + + test "updates the user email", %{conn: conn, user: user} do + new_email = unique_user_email() + + {:ok, lv, _html} = live(conn, ~p"/users/settings") + + result = + lv + |> form("#email_form", %{ + "user" => %{"email" => new_email} + }) + |> render_submit() + + assert result =~ "A link to confirm your email" + assert Accounts.get_user_by_email(user.email) + end + + test "renders errors with invalid data (phx-change)", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/settings") + + result = + lv + |> element("#email_form") + |> render_change(%{ + "action" => "update_email", + "user" => %{"email" => "with spaces"} + }) + + assert result =~ "Change Email" + assert result =~ "must have the @ sign and no spaces" + end + + test "renders errors with invalid data (phx-submit)", %{conn: conn, user: user} do + {:ok, lv, _html} = live(conn, ~p"/users/settings") + + result = + lv + |> form("#email_form", %{ + "user" => %{"email" => user.email} + }) + |> render_submit() + + assert result =~ "Change Email" + assert result =~ "did not change" + end + end + + describe "update password form" do + setup %{conn: conn} do + user = user_fixture() + %{conn: log_in_user(conn, user), user: user} + end + + test "updates the user password", %{conn: conn, user: user} do + new_password = valid_user_password() + + {:ok, lv, _html} = live(conn, ~p"/users/settings") + + form = + form(lv, "#password_form", %{ + "user" => %{ + "email" => user.email, + "password" => new_password, + "password_confirmation" => new_password + } + }) + + render_submit(form) + + new_password_conn = follow_trigger_action(form, conn) + + assert redirected_to(new_password_conn) == ~p"/users/settings" + + assert get_session(new_password_conn, :user_token) != get_session(conn, :user_token) + + assert Phoenix.Flash.get(new_password_conn.assigns.flash, :info) =~ + "Password updated successfully" + + assert Accounts.get_user_by_email_and_password(user.email, new_password) + end + + test "renders errors with invalid data (phx-change)", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/settings") + + result = + lv + |> element("#password_form") + |> render_change(%{ + "user" => %{ + "password" => "too short", + "password_confirmation" => "does not match" + } + }) + + assert result =~ "Save Password" + assert result =~ "should be at least 12 character(s)" + assert result =~ "does not match password" + end + + test "renders errors with invalid data (phx-submit)", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"/users/settings") + + result = + lv + |> form("#password_form", %{ + "user" => %{ + "password" => "too short", + "password_confirmation" => "does not match" + } + }) + |> render_submit() + + assert result =~ "Save Password" + assert result =~ "should be at least 12 character(s)" + assert result =~ "does not match password" + end + end + + describe "confirm email" do + setup %{conn: conn} do + user = user_fixture() + email = unique_user_email() + + token = + extract_user_token(fn url -> + Accounts.deliver_user_update_email_instructions(%{user | email: email}, user.email, url) + end) + + %{conn: log_in_user(conn, user), token: token, email: email, user: user} + end + + test "updates the user email once", %{conn: conn, user: user, token: token, email: email} do + {:error, redirect} = live(conn, ~p"/users/settings/confirm-email/#{token}") + + assert {:live_redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"/users/settings" + assert %{"info" => message} = flash + assert message == "Email changed successfully." + refute Accounts.get_user_by_email(user.email) + assert Accounts.get_user_by_email(email) + + # use confirm token again + {:error, redirect} = live(conn, ~p"/users/settings/confirm-email/#{token}") + assert {:live_redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"/users/settings" + assert %{"error" => message} = flash + assert message == "Email change link is invalid or it has expired." + end + + test "does not update email with invalid token", %{conn: conn, user: user} do + {:error, redirect} = live(conn, ~p"/users/settings/confirm-email/oops") + assert {:live_redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"/users/settings" + assert %{"error" => message} = flash + assert message == "Email change link is invalid or it has expired." + assert Accounts.get_user_by_email(user.email) + end + + test "redirects if user is not logged in", %{token: token} do + conn = build_conn() + {:error, redirect} = live(conn, ~p"/users/settings/confirm-email/#{token}") + assert {:redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"/users/log-in" + assert %{"error" => message} = flash + assert message == "You must log in to access this page." + end + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/srql_builder_downsample_test.exs b/web-ng/test/serviceradar_web_ng_web/srql_builder_downsample_test.exs new file mode 100644 index 000000000..a59143ee2 --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/srql_builder_downsample_test.exs @@ -0,0 +1,38 @@ +defmodule ServiceRadarWebNGWeb.SRQLBuilderDownsampleTest do + use ExUnit.Case, async: true + + alias ServiceRadarWebNGWeb.SRQL.Builder + + test "builds downsample tokens for timeseries metrics" do + state = + Builder.default_state("timeseries_metrics", 100) + |> Map.put("filters", []) + |> Map.put("time", "last_24h") + |> Map.put("bucket", "5m") + |> Map.put("agg", "avg") + |> Map.put("series", "metric_name") + + query = Builder.build(state) + assert query =~ "in:timeseries_metrics" + assert query =~ "time:last_24h" + assert query =~ "bucket:5m" + assert query =~ "agg:avg" + assert query =~ "series:metric_name" + end + + test "parses downsample tokens for cpu metrics" do + query = "in:cpu_metrics time:last_1h bucket:15s agg:max series:core_id limit:50" + assert {:ok, builder} = Builder.parse(query) + assert builder["entity"] == "cpu_metrics" + assert builder["time"] == "last_1h" + assert builder["bucket"] == "15s" + assert builder["agg"] == "max" + assert builder["series"] == "core_id" + assert builder["limit"] == 50 + end + + test "rejects downsample tokens for non-metric entities" do + query = "in:devices time:last_24h bucket:5m agg:avg series:device_id limit:10" + assert {:error, :downsample_not_supported} = Builder.parse(query) + end +end diff --git a/web-ng/test/serviceradar_web_ng_web/user_auth_test.exs b/web-ng/test/serviceradar_web_ng_web/user_auth_test.exs new file mode 100644 index 000000000..d7353044c --- /dev/null +++ b/web-ng/test/serviceradar_web_ng_web/user_auth_test.exs @@ -0,0 +1,390 @@ +defmodule ServiceRadarWebNGWeb.UserAuthTest do + use ServiceRadarWebNGWeb.ConnCase, async: true + + alias Phoenix.LiveView + alias ServiceRadarWebNG.Accounts + alias ServiceRadarWebNG.Accounts.Scope + alias ServiceRadarWebNGWeb.UserAuth + + import ServiceRadarWebNG.AccountsFixtures + + @remember_me_cookie "_service_radar_web_ng_web_user_remember_me" + @remember_me_cookie_max_age 60 * 60 * 24 * 14 + + setup %{conn: conn} do + conn = + conn + |> Map.replace!(:secret_key_base, ServiceRadarWebNGWeb.Endpoint.config(:secret_key_base)) + |> init_test_session(%{}) + + %{user: %{user_fixture() | authenticated_at: DateTime.utc_now(:second)}, conn: conn} + end + + describe "log_in_user/3" do + test "stores the user token in the session", %{conn: conn, user: user} do + conn = UserAuth.log_in_user(conn, user) + assert token = get_session(conn, :user_token) + assert get_session(conn, :live_socket_id) == "users_sessions:#{Base.url_encode64(token)}" + assert redirected_to(conn) == ~p"/" + assert Accounts.get_user_by_session_token(token) + end + + test "clears everything previously stored in the session", %{conn: conn, user: user} do + conn = conn |> put_session(:to_be_removed, "value") |> UserAuth.log_in_user(user) + refute get_session(conn, :to_be_removed) + end + + test "keeps session when re-authenticating", %{conn: conn, user: user} do + conn = + conn + |> assign(:current_scope, Scope.for_user(user)) + |> put_session(:to_be_removed, "value") + |> UserAuth.log_in_user(user) + + assert get_session(conn, :to_be_removed) + end + + test "clears session when user does not match when re-authenticating", %{ + conn: conn, + user: user + } do + other_user = user_fixture() + + conn = + conn + |> assign(:current_scope, Scope.for_user(other_user)) + |> put_session(:to_be_removed, "value") + |> UserAuth.log_in_user(user) + + refute get_session(conn, :to_be_removed) + end + + test "redirects to the configured path", %{conn: conn, user: user} do + conn = conn |> put_session(:user_return_to, "/hello") |> UserAuth.log_in_user(user) + assert redirected_to(conn) == "/hello" + end + + test "writes a cookie if remember_me is configured", %{conn: conn, user: user} do + conn = conn |> fetch_cookies() |> UserAuth.log_in_user(user, %{"remember_me" => "true"}) + assert get_session(conn, :user_token) == conn.cookies[@remember_me_cookie] + assert get_session(conn, :user_remember_me) == true + + assert %{value: signed_token, max_age: max_age} = conn.resp_cookies[@remember_me_cookie] + assert signed_token != get_session(conn, :user_token) + assert max_age == @remember_me_cookie_max_age + end + + test "redirects to settings when user is already logged in", %{conn: conn, user: user} do + conn = + conn + |> assign(:current_scope, Scope.for_user(user)) + |> UserAuth.log_in_user(user) + + assert redirected_to(conn) == ~p"/dashboard" + end + + test "writes a cookie if remember_me was set in previous session", %{conn: conn, user: user} do + conn = conn |> fetch_cookies() |> UserAuth.log_in_user(user, %{"remember_me" => "true"}) + assert get_session(conn, :user_token) == conn.cookies[@remember_me_cookie] + assert get_session(conn, :user_remember_me) == true + + conn = + conn + |> recycle() + |> Map.replace!(:secret_key_base, ServiceRadarWebNGWeb.Endpoint.config(:secret_key_base)) + |> fetch_cookies() + |> init_test_session(%{user_remember_me: true}) + + # the conn is already logged in and has the remember_me cookie set, + # now we log in again and even without explicitly setting remember_me, + # the cookie should be set again + conn = conn |> UserAuth.log_in_user(user, %{}) + assert %{value: signed_token, max_age: max_age} = conn.resp_cookies[@remember_me_cookie] + assert signed_token != get_session(conn, :user_token) + assert max_age == @remember_me_cookie_max_age + assert get_session(conn, :user_remember_me) == true + end + end + + describe "logout_user/1" do + test "erases session and cookies", %{conn: conn, user: user} do + user_token = Accounts.generate_user_session_token(user) + + conn = + conn + |> put_session(:user_token, user_token) + |> put_req_cookie(@remember_me_cookie, user_token) + |> fetch_cookies() + |> UserAuth.log_out_user() + + refute get_session(conn, :user_token) + refute conn.cookies[@remember_me_cookie] + assert %{max_age: 0} = conn.resp_cookies[@remember_me_cookie] + assert redirected_to(conn) == ~p"/" + refute Accounts.get_user_by_session_token(user_token) + end + + test "broadcasts to the given live_socket_id", %{conn: conn} do + live_socket_id = "users_sessions:abcdef-token" + ServiceRadarWebNGWeb.Endpoint.subscribe(live_socket_id) + + conn + |> put_session(:live_socket_id, live_socket_id) + |> UserAuth.log_out_user() + + assert_receive %Phoenix.Socket.Broadcast{event: "disconnect", topic: ^live_socket_id} + end + + test "works even if user is already logged out", %{conn: conn} do + conn = conn |> fetch_cookies() |> UserAuth.log_out_user() + refute get_session(conn, :user_token) + assert %{max_age: 0} = conn.resp_cookies[@remember_me_cookie] + assert redirected_to(conn) == ~p"/" + end + end + + describe "fetch_current_scope_for_user/2" do + test "authenticates user from session", %{conn: conn, user: user} do + user_token = Accounts.generate_user_session_token(user) + + conn = + conn |> put_session(:user_token, user_token) |> UserAuth.fetch_current_scope_for_user([]) + + assert conn.assigns.current_scope.user.id == user.id + assert conn.assigns.current_scope.user.authenticated_at == user.authenticated_at + assert get_session(conn, :user_token) == user_token + end + + test "authenticates user from cookies", %{conn: conn, user: user} do + logged_in_conn = + conn |> fetch_cookies() |> UserAuth.log_in_user(user, %{"remember_me" => "true"}) + + user_token = logged_in_conn.cookies[@remember_me_cookie] + %{value: signed_token} = logged_in_conn.resp_cookies[@remember_me_cookie] + + conn = + conn + |> put_req_cookie(@remember_me_cookie, signed_token) + |> UserAuth.fetch_current_scope_for_user([]) + + assert conn.assigns.current_scope.user.id == user.id + assert conn.assigns.current_scope.user.authenticated_at == user.authenticated_at + assert get_session(conn, :user_token) == user_token + assert get_session(conn, :user_remember_me) + + assert get_session(conn, :live_socket_id) == + "users_sessions:#{Base.url_encode64(user_token)}" + end + + test "does not authenticate if data is missing", %{conn: conn, user: user} do + _ = Accounts.generate_user_session_token(user) + conn = UserAuth.fetch_current_scope_for_user(conn, []) + refute get_session(conn, :user_token) + refute conn.assigns.current_scope + end + + test "reissues a new token after a few days and refreshes cookie", %{conn: conn, user: user} do + logged_in_conn = + conn |> fetch_cookies() |> UserAuth.log_in_user(user, %{"remember_me" => "true"}) + + token = logged_in_conn.cookies[@remember_me_cookie] + %{value: signed_token} = logged_in_conn.resp_cookies[@remember_me_cookie] + + offset_user_token(token, -10, :day) + {user, _} = Accounts.get_user_by_session_token(token) + + conn = + conn + |> put_session(:user_token, token) + |> put_session(:user_remember_me, true) + |> put_req_cookie(@remember_me_cookie, signed_token) + |> UserAuth.fetch_current_scope_for_user([]) + + assert conn.assigns.current_scope.user.id == user.id + assert conn.assigns.current_scope.user.authenticated_at == user.authenticated_at + assert new_token = get_session(conn, :user_token) + assert new_token != token + assert %{value: new_signed_token, max_age: max_age} = conn.resp_cookies[@remember_me_cookie] + assert new_signed_token != signed_token + assert max_age == @remember_me_cookie_max_age + end + end + + describe "on_mount :mount_current_scope" do + setup %{conn: conn} do + %{conn: UserAuth.fetch_current_scope_for_user(conn, [])} + end + + test "assigns current_scope based on a valid user_token", %{conn: conn, user: user} do + user_token = Accounts.generate_user_session_token(user) + session = conn |> put_session(:user_token, user_token) |> get_session() + + {:cont, updated_socket} = + UserAuth.on_mount(:mount_current_scope, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.current_scope.user.id == user.id + end + + test "assigns nil to current_scope assign if there isn't a valid user_token", %{conn: conn} do + user_token = "invalid_token" + session = conn |> put_session(:user_token, user_token) |> get_session() + + {:cont, updated_socket} = + UserAuth.on_mount(:mount_current_scope, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.current_scope == nil + end + + test "assigns nil to current_scope assign if there isn't a user_token", %{conn: conn} do + session = conn |> get_session() + + {:cont, updated_socket} = + UserAuth.on_mount(:mount_current_scope, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.current_scope == nil + end + end + + describe "on_mount :require_authenticated" do + test "authenticates current_scope based on a valid user_token", %{conn: conn, user: user} do + user_token = Accounts.generate_user_session_token(user) + session = conn |> put_session(:user_token, user_token) |> get_session() + + {:cont, updated_socket} = + UserAuth.on_mount(:require_authenticated, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.current_scope.user.id == user.id + end + + test "redirects to login page if there isn't a valid user_token", %{conn: conn} do + user_token = "invalid_token" + session = conn |> put_session(:user_token, user_token) |> get_session() + + socket = %LiveView.Socket{ + endpoint: ServiceRadarWebNGWeb.Endpoint, + assigns: %{__changed__: %{}, flash: %{}} + } + + {:halt, updated_socket} = UserAuth.on_mount(:require_authenticated, %{}, session, socket) + assert updated_socket.assigns.current_scope == nil + end + + test "redirects to login page if there isn't a user_token", %{conn: conn} do + session = conn |> get_session() + + socket = %LiveView.Socket{ + endpoint: ServiceRadarWebNGWeb.Endpoint, + assigns: %{__changed__: %{}, flash: %{}} + } + + {:halt, updated_socket} = UserAuth.on_mount(:require_authenticated, %{}, session, socket) + assert updated_socket.assigns.current_scope == nil + end + end + + describe "on_mount :require_sudo_mode" do + test "allows users that have authenticated in the last 10 minutes", %{conn: conn, user: user} do + user_token = Accounts.generate_user_session_token(user) + session = conn |> put_session(:user_token, user_token) |> get_session() + + socket = %LiveView.Socket{ + endpoint: ServiceRadarWebNGWeb.Endpoint, + assigns: %{__changed__: %{}, flash: %{}} + } + + assert {:cont, _updated_socket} = + UserAuth.on_mount(:require_sudo_mode, %{}, session, socket) + end + + test "redirects when authentication is too old", %{conn: conn, user: user} do + eleven_minutes_ago = DateTime.utc_now(:second) |> DateTime.add(-11, :minute) + user = %{user | authenticated_at: eleven_minutes_ago} + user_token = Accounts.generate_user_session_token(user) + {user, token_inserted_at} = Accounts.get_user_by_session_token(user_token) + assert DateTime.compare(token_inserted_at, user.authenticated_at) == :gt + session = conn |> put_session(:user_token, user_token) |> get_session() + + socket = %LiveView.Socket{ + endpoint: ServiceRadarWebNGWeb.Endpoint, + assigns: %{__changed__: %{}, flash: %{}} + } + + assert {:halt, _updated_socket} = + UserAuth.on_mount(:require_sudo_mode, %{}, session, socket) + end + end + + describe "require_authenticated_user/2" do + setup %{conn: conn} do + %{conn: UserAuth.fetch_current_scope_for_user(conn, [])} + end + + test "redirects if user is not authenticated", %{conn: conn} do + conn = conn |> fetch_flash() |> UserAuth.require_authenticated_user([]) + assert conn.halted + + assert redirected_to(conn) == ~p"/users/log-in" + + assert Phoenix.Flash.get(conn.assigns.flash, :error) == + "You must log in to access this page." + end + + test "stores the path to redirect to on GET", %{conn: conn} do + halted_conn = + %{conn | path_info: ["foo"], query_string: ""} + |> fetch_flash() + |> UserAuth.require_authenticated_user([]) + + assert halted_conn.halted + assert get_session(halted_conn, :user_return_to) == "/foo" + + halted_conn = + %{conn | path_info: ["foo"], query_string: "bar=baz"} + |> fetch_flash() + |> UserAuth.require_authenticated_user([]) + + assert halted_conn.halted + assert get_session(halted_conn, :user_return_to) == "/foo?bar=baz" + + halted_conn = + %{conn | path_info: ["foo"], query_string: "bar", method: "POST"} + |> fetch_flash() + |> UserAuth.require_authenticated_user([]) + + assert halted_conn.halted + refute get_session(halted_conn, :user_return_to) + end + + test "does not redirect if user is authenticated", %{conn: conn, user: user} do + conn = + conn + |> assign(:current_scope, Scope.for_user(user)) + |> UserAuth.require_authenticated_user([]) + + refute conn.halted + refute conn.status + end + end + + describe "disconnect_sessions/1" do + test "broadcasts disconnect messages for each token" do + tokens = [%{token: "token1"}, %{token: "token2"}] + + for %{token: token} <- tokens do + ServiceRadarWebNGWeb.Endpoint.subscribe("users_sessions:#{Base.url_encode64(token)}") + end + + UserAuth.disconnect_sessions(tokens) + + assert_receive %Phoenix.Socket.Broadcast{ + event: "disconnect", + topic: "users_sessions:dG9rZW4x" + } + + assert_receive %Phoenix.Socket.Broadcast{ + event: "disconnect", + topic: "users_sessions:dG9rZW4y" + } + end + end +end diff --git a/web-ng/test/support/conn_case.ex b/web-ng/test/support/conn_case.ex new file mode 100644 index 000000000..99e982b08 --- /dev/null +++ b/web-ng/test/support/conn_case.ex @@ -0,0 +1,79 @@ +defmodule ServiceRadarWebNGWeb.ConnCase do + @moduledoc """ + This module defines the test case to be used by + tests that require setting up a connection. + + Such tests rely on `Phoenix.ConnTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use ServiceRadarWebNGWeb.ConnCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # The default endpoint for testing + @endpoint ServiceRadarWebNGWeb.Endpoint + + use ServiceRadarWebNGWeb, :verified_routes + + # Import conveniences for testing with connections + import Plug.Conn + import Phoenix.ConnTest + import ServiceRadarWebNGWeb.ConnCase + end + end + + setup tags do + ServiceRadarWebNG.DataCase.setup_sandbox(tags) + {:ok, conn: Phoenix.ConnTest.build_conn()} + end + + @doc """ + Setup helper that registers and logs in users. + + setup :register_and_log_in_user + + It stores an updated connection and a registered user in the + test context. + """ + def register_and_log_in_user(%{conn: conn} = context) do + user = ServiceRadarWebNG.AccountsFixtures.user_fixture() + scope = ServiceRadarWebNG.Accounts.Scope.for_user(user) + + opts = + context + |> Map.take([:token_authenticated_at]) + |> Enum.into([]) + + %{conn: log_in_user(conn, user, opts), user: user, scope: scope} + end + + @doc """ + Logs the given `user` into the `conn`. + + It returns an updated `conn`. + """ + def log_in_user(conn, user, opts \\ []) do + token = ServiceRadarWebNG.Accounts.generate_user_session_token(user) + + maybe_set_token_authenticated_at(token, opts[:token_authenticated_at]) + + conn + |> Phoenix.ConnTest.init_test_session(%{}) + |> Plug.Conn.put_session(:user_token, token) + end + + defp maybe_set_token_authenticated_at(_token, nil), do: nil + + defp maybe_set_token_authenticated_at(token, authenticated_at) do + ServiceRadarWebNG.AccountsFixtures.override_token_authenticated_at(token, authenticated_at) + end +end diff --git a/web-ng/test/support/data_case.ex b/web-ng/test/support/data_case.ex new file mode 100644 index 000000000..cbfd3cfec --- /dev/null +++ b/web-ng/test/support/data_case.ex @@ -0,0 +1,58 @@ +defmodule ServiceRadarWebNG.DataCase do + @moduledoc """ + This module defines the setup for tests requiring + access to the application's data layer. + + You may define functions here to be used as helpers in + your tests. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use ServiceRadarWebNG.DataCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + alias ServiceRadarWebNG.Repo + + import Ecto + import Ecto.Changeset + import Ecto.Query + import ServiceRadarWebNG.DataCase + end + end + + setup tags do + ServiceRadarWebNG.DataCase.setup_sandbox(tags) + :ok + end + + @doc """ + Sets up the sandbox based on the test tags. + """ + def setup_sandbox(tags) do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(ServiceRadarWebNG.Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + end + + @doc """ + A helper that transforms changeset errors into a map of messages. + + assert {:error, changeset} = Accounts.create_user(%{password: "short"}) + assert "password is too short" in errors_on(changeset).password + assert %{password: ["password is too short"]} = errors_on(changeset) + + """ + def errors_on(changeset) do + Ecto.Changeset.traverse_errors(changeset, fn {message, opts} -> + Regex.replace(~r"%{(\w+)}", message, fn _, key -> + opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() + end) + end) + end +end diff --git a/web-ng/test/support/fixtures/accounts_fixtures.ex b/web-ng/test/support/fixtures/accounts_fixtures.ex new file mode 100644 index 000000000..9c2d15b5b --- /dev/null +++ b/web-ng/test/support/fixtures/accounts_fixtures.ex @@ -0,0 +1,89 @@ +defmodule ServiceRadarWebNG.AccountsFixtures do + @moduledoc """ + This module defines test helpers for creating + entities via the `ServiceRadarWebNG.Accounts` context. + """ + + import Ecto.Query + + alias ServiceRadarWebNG.Accounts + alias ServiceRadarWebNG.Accounts.Scope + + def unique_user_email, do: "user#{System.unique_integer()}@example.com" + def valid_user_password, do: "hello world!" + + def valid_user_attributes(attrs \\ %{}) do + Enum.into(attrs, %{ + email: unique_user_email() + }) + end + + def unconfirmed_user_fixture(attrs \\ %{}) do + {:ok, user} = + attrs + |> valid_user_attributes() + |> Accounts.register_user() + + user + end + + def user_fixture(attrs \\ %{}) do + user = unconfirmed_user_fixture(attrs) + + token = + extract_user_token(fn url -> + Accounts.deliver_login_instructions(user, url) + end) + + {:ok, {user, _expired_tokens}} = + Accounts.login_user_by_magic_link(token) + + user + end + + def user_scope_fixture do + user = user_fixture() + user_scope_fixture(user) + end + + def user_scope_fixture(user) do + Scope.for_user(user) + end + + def set_password(user) do + {:ok, {user, _expired_tokens}} = + Accounts.update_user_password(user, %{password: valid_user_password()}) + + user + end + + def extract_user_token(fun) do + {:ok, captured_email} = fun.(&"[TOKEN]#{&1}[TOKEN]") + [_, token | _] = String.split(captured_email.text_body, "[TOKEN]") + token + end + + def override_token_authenticated_at(token, authenticated_at) when is_binary(token) do + ServiceRadarWebNG.Repo.update_all( + from(t in Accounts.UserToken, + where: t.token == ^token + ), + set: [authenticated_at: authenticated_at] + ) + end + + def generate_user_magic_link_token(user) do + {encoded_token, user_token} = Accounts.UserToken.build_email_token(user, "login") + ServiceRadarWebNG.Repo.insert!(user_token) + {encoded_token, user_token.token} + end + + def offset_user_token(token, amount_to_add, unit) do + dt = DateTime.add(DateTime.utc_now(:second), amount_to_add, unit) + + ServiceRadarWebNG.Repo.update_all( + from(ut in Accounts.UserToken, where: ut.token == ^token), + set: [inserted_at: dt, authenticated_at: dt] + ) + end +end diff --git a/web-ng/test/support/generators/edge_onboarding_generators.ex b/web-ng/test/support/generators/edge_onboarding_generators.ex new file mode 100644 index 000000000..c424f6b12 --- /dev/null +++ b/web-ng/test/support/generators/edge_onboarding_generators.ex @@ -0,0 +1,30 @@ +defmodule ServiceRadarWebNG.Generators.EdgeOnboardingGenerators do + @moduledoc false + + import StreamData + + def package_id do + string(:alphanumeric, min_length: 1, max_length: 64) + end + + def download_token do + string(:printable, min_length: 1, max_length: 128) + |> map(&String.trim/1) + |> filter(&(&1 != "")) + end + + def core_api_url do + one_of([ + constant(nil), + constant("http://localhost:8090"), + constant("https://example.com"), + string(:alphanumeric, min_length: 1, max_length: 24) + |> map(fn host -> "https://#{host}.test" end) + ]) + end + + def random_token_string(opts \\ []) do + max_length = Keyword.get(opts, :max_length, 400) + string(:printable, max_length: max_length) + end +end diff --git a/web-ng/test/support/generators/property_opts.ex b/web-ng/test/support/generators/property_opts.ex new file mode 100644 index 000000000..b2b8d508f --- /dev/null +++ b/web-ng/test/support/generators/property_opts.ex @@ -0,0 +1,18 @@ +defmodule ServiceRadarWebNG.TestSupport.PropertyOpts do + @moduledoc false + + def max_runs(tag \\ nil) do + default = + case tag do + :slow_property -> 200 + _ -> 50 + end + + System.get_env("PROPERTY_MAX_RUNS", Integer.to_string(default)) + |> Integer.parse() + |> case do + {value, ""} when value > 0 -> value + _ -> default + end + end +end diff --git a/web-ng/test/support/generators/srql_generators.ex b/web-ng/test/support/generators/srql_generators.ex new file mode 100644 index 000000000..8b440bf16 --- /dev/null +++ b/web-ng/test/support/generators/srql_generators.ex @@ -0,0 +1,42 @@ +defmodule ServiceRadarWebNG.Generators.SRQLGenerators do + @moduledoc false + + import StreamData + + def printable_query_string(opts \\ []) do + max_length = Keyword.get(opts, :max_length, 200) + string(:printable, max_length: max_length) + end + + def json_key(opts \\ []) do + min_length = Keyword.get(opts, :min_length, 1) + max_length = Keyword.get(opts, :max_length, 24) + + string(:alphanumeric, min_length: min_length, max_length: max_length) + end + + def json_value do + one_of([ + string(:printable, max_length: 200), + integer(), + boolean(), + constant(nil) + ]) + end + + def json_map(opts \\ []) do + max_length = Keyword.get(opts, :max_length, 12) + map_of(json_key(), json_value(), max_length: max_length) + end + + def untrusted_param_value do + one_of([ + string(:printable, max_length: 200), + integer(), + boolean(), + constant(nil), + list_of(string(:printable, max_length: 40), max_length: 5), + map_of(json_key(max_length: 12), string(:printable, max_length: 60), max_length: 6) + ]) + end +end diff --git a/web-ng/test/support/srql_stub.ex b/web-ng/test/support/srql_stub.ex new file mode 100644 index 000000000..c25da78e2 --- /dev/null +++ b/web-ng/test/support/srql_stub.ex @@ -0,0 +1,22 @@ +defmodule ServiceRadarWebNG.TestSupport.SRQLStub do + @moduledoc false + + @behaviour ServiceRadarWebNG.SRQLBehaviour + + def query(query) when is_binary(query) do + {:ok, %{"results" => [], "pagination" => %{}, "error" => nil}} + end + + def query(_query) do + {:error, :invalid_query} + end + + @impl true + def query_request(%{"query" => query}) when is_binary(query) do + {:ok, %{"results" => [], "pagination" => %{}, "error" => nil}} + end + + def query_request(_payload) do + {:error, :invalid_request} + end +end diff --git a/web-ng/test/test_helper.exs b/web-ng/test/test_helper.exs new file mode 100644 index 000000000..be7966164 --- /dev/null +++ b/web-ng/test/test_helper.exs @@ -0,0 +1,85 @@ +ExUnit.start() + +{:ok, _} = Application.ensure_all_started(:serviceradar_web_ng) + +repo = ServiceRadarWebNG.Repo + +_ = + Ecto.Adapters.SQL.query!( + repo, + """ + CREATE TABLE IF NOT EXISTS unified_devices ( + device_id text PRIMARY KEY + ) + """, + [] + ) + +_ = + Enum.each( + [ + {"ip", "text"}, + {"poller_id", "text"}, + {"agent_id", "text"}, + {"hostname", "text"}, + {"mac", "text"}, + {"discovery_sources", "text[]"}, + {"is_available", "boolean"}, + {"first_seen", "timestamptz"}, + {"last_seen", "timestamptz"}, + {"metadata", "jsonb"}, + {"device_type", "text"}, + {"service_type", "text"}, + {"service_status", "text"}, + {"last_heartbeat", "timestamptz"}, + {"os_info", "text"}, + {"version_info", "text"}, + {"updated_at", "timestamptz"} + ], + fn {col, type} -> + Ecto.Adapters.SQL.query!( + repo, + "ALTER TABLE unified_devices ADD COLUMN IF NOT EXISTS #{col} #{type}", + [] + ) + end + ) + +_ = + Ecto.Adapters.SQL.query!( + repo, + """ + CREATE TABLE IF NOT EXISTS pollers ( + poller_id text PRIMARY KEY + ) + """, + [] + ) + +_ = + Enum.each( + [ + {"component_id", "text"}, + {"registration_source", "text"}, + {"status", "text"}, + {"spiffe_identity", "text"}, + {"first_registered", "timestamptz"}, + {"first_seen", "timestamptz"}, + {"last_seen", "timestamptz"}, + {"metadata", "jsonb"}, + {"created_by", "text"}, + {"is_healthy", "boolean"}, + {"agent_count", "integer"}, + {"checker_count", "integer"}, + {"updated_at", "timestamptz"} + ], + fn {col, type} -> + Ecto.Adapters.SQL.query!( + repo, + "ALTER TABLE pollers ADD COLUMN IF NOT EXISTS #{col} #{type}", + [] + ) + end + ) + +Ecto.Adapters.SQL.Sandbox.mode(ServiceRadarWebNG.Repo, :manual)