forked from vectorize-io/hindsight
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
79 lines (68 loc) · 3.38 KB
/
.env.example
File metadata and controls
79 lines (68 loc) · 3.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# Hindsight Environment Variables
# Copy this file to .env and fill in your values
# LLM Configuration (Required)
# Supported providers: openai, groq, ollama, gemini, anthropic, lmstudio, vertexai
HINDSIGHT_API_LLM_PROVIDER=openai
HINDSIGHT_API_LLM_API_KEY=your-api-key-here
HINDSIGHT_API_LLM_MODEL=gpt-4o-mini
HINDSIGHT_API_LLM_BASE_URL=https://api.openai.com/v1
# Example: Anthropic Claude configuration
# HINDSIGHT_API_LLM_PROVIDER=anthropic
# HINDSIGHT_API_LLM_API_KEY=your-anthropic-api-key
# HINDSIGHT_API_LLM_MODEL=claude-sonnet-4-20250514
# Example: Google Vertex AI configuration
# HINDSIGHT_API_LLM_PROVIDER=vertexai
# HINDSIGHT_API_LLM_MODEL=google/gemini-2.0-flash-001
# HINDSIGHT_API_LLM_VERTEXAI_PROJECT_ID=your-gcp-project-id
# HINDSIGHT_API_LLM_VERTEXAI_REGION=us-central1
# HINDSIGHT_API_LLM_VERTEXAI_SERVICE_ACCOUNT_KEY=/path/to/service-account-key.json # Optional, uses ADC if not set
# Example: LM Studio local configuration (Qwen 2.5 32B recommended)
# HINDSIGHT_API_LLM_PROVIDER=lmstudio
# HINDSIGHT_API_LLM_API_KEY=lmstudio
# HINDSIGHT_API_LLM_BASE_URL=http://localhost:1234/v1
# HINDSIGHT_API_LLM_MODEL=qwen2.5-32b-instruct
# API Configuration (Optional)
HINDSIGHT_API_HOST=0.0.0.0
HINDSIGHT_API_PORT=8888
HINDSIGHT_API_LOG_LEVEL=info
# Base Path / Reverse Proxy Support (Optional)
# Set these when deploying behind a reverse proxy with path-based routing
# Example: To deploy at example.com/hindsight/, set both to "/hindsight"
# HINDSIGHT_API_BASE_PATH=/hindsight
# NEXT_PUBLIC_BASE_PATH=/hindsight
# Database (Optional - uses embedded pg0 by default)
# HINDSIGHT_API_DATABASE_URL=postgresql://user:pass@host:5432/db
# HINDSIGHT_API_DATABASE_SCHEMA=public # PostgreSQL schema name (default: public)
# Vector Extension (Optional - uses pgvector by default)
# Options: "pgvector" (default), "vchord", "pgvectorscale" (DiskANN)
# HINDSIGHT_API_VECTOR_EXTENSION=pgvector
# For Azure PostgreSQL with DiskANN:
# HINDSIGHT_API_VECTOR_EXTENSION=pgvectorscale # Auto-detects pg_diskann on Azure
# Embeddings Configuration (Optional - uses local by default)
# Provider: "local" (default) or "tei" (HuggingFace Text Embeddings Inference)
# HINDSIGHT_API_EMBEDDINGS_PROVIDER=local
# For local provider:
# HINDSIGHT_API_EMBEDDINGS_LOCAL_MODEL=BAAI/bge-small-en-v1.5
# For TEI provider:
# HINDSIGHT_API_EMBEDDINGS_TEI_URL=http://localhost:8080
# Reranker Configuration (Optional - uses local by default)
# Provider: "local" (default) or "tei" (HuggingFace Text Embeddings Inference)
# HINDSIGHT_API_RERANKER_PROVIDER=local
# For local provider:
# HINDSIGHT_API_RERANKER_LOCAL_MODEL=cross-encoder/ms-marco-MiniLM-L-6-v2
# For TEI provider:
# HINDSIGHT_API_RERANKER_TEI_URL=http://localhost:8081
# Observability & Tracing (Optional - disabled by default)
# Enable OpenTelemetry tracing for LLM calls (GenAI semantic conventions)
# HINDSIGHT_API_OTEL_TRACES_ENABLED=true
#
# Local development with Grafana LGTM stack (recommended - see scripts/dev/grafana/README.md)
# HINDSIGHT_API_OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318
#
# Cloud backends (Grafana Cloud, Langfuse, DataDog, etc.)
# HINDSIGHT_API_OTEL_EXPORTER_OTLP_ENDPOINT=https://your-backend-url
# HINDSIGHT_API_OTEL_EXPORTER_OTLP_HEADERS="Authorization=Bearer your-token"
#
# Custom service name and environment (optional, defaults: hindsight-api, development)
# HINDSIGHT_API_OTEL_SERVICE_NAME=hindsight-production
# HINDSIGHT_API_OTEL_DEPLOYMENT_ENVIRONMENT=production