-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
91 lines (84 loc) · 3.55 KB
/
.env.example
File metadata and controls
91 lines (84 loc) · 3.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
GEMINI_API_KEY=
# Azure OpenAI (Phase 1: chat + embeddings)
AZURE_OPENAI_ENDPOINT=
AZURE_OPENAI_API_KEY=
OPENAI_API_VERSION=2024-10-21
AZURE_OPENAI_CHAT_DEPLOYMENT=
AZURE_OPENAI_EMBEDDING_DEPLOYMENT=
# Runtime settings profile. Use local-laptop-gemma for Ollama mode or
# local-llama-cpp for llama.cpp mode. The scripts in scripts/use-*.ps1 update this.
S18_PROFILE=local-laptop-gemma
# Docker-safe mode switching (recommended): do NOT mutate this file repeatedly.
# Use compose env files instead:
# .env.docker.ollama
# .env.docker.llama-cpp
# .env.docker.llama-cpp-host
# Ollama HTTP base (no path). When the API/worker runs IN Docker *and* you use the
# Compose `ollama` service, use http://ollama:11434. Use host.docker.internal ONLY
# when Ollama runs on your host OS while the API runs in Docker.
# Wrong URL here → RemMe/agent miss the real server; 404 on /api/chat often means
# the model was not pulled on the Ollama you are hitting (`ollama pull <model>`).
OLLAMA_BASE_URL=http://ollama:11434
# Ollama HTTP timeout (seconds). Affects run completion; allow 360+ for slow agent steps.
OLLAMA_TIMEOUT=360
# llama.cpp server URL (OpenAI-compatible API exposed by llama-server)
# If using compose llama_cpp profile, set to http://s18share-llama-cpp:8080
# If using host llama-server, set to http://host.docker.internal:8080
LLAMA_CPP_BASE_URL=http://localhost:8080
# llama.cpp HTTP timeout (seconds)
LLAMA_CPP_TIMEOUT=360
# Run execution backend. Use "in_process" for local/default behavior or "celery" for Redis-backed workers.
S18_RUN_EXECUTOR=in_process
# Celery broker/result backend. Local: redis://localhost:6379/0; Docker/K8s: redis://redis:6379/0
CELERY_BROKER_URL=redis://localhost:6379/0
CELERY_RESULT_BACKEND=redis://localhost:6379/0
S18_CELERY_PREFETCH_MULTIPLIER=1
# Supabase auth + backend verification
AUTH_ENABLED=false
# Docker compose uses this to avoid collisions with other repos' AUTH_ENABLED.
S18_AUTH_ENABLED=false
SUPABASE_URL=
SUPABASE_ANON_KEY=
SUPABASE_JWT_AUDIENCE=authenticated
# Optional backend persistence to Supabase tables
SUPABASE_LOGGING_ENABLED=false
SUPABASE_SERVICE_ROLE_KEY=
# --- Power Apps -> Cloud RAG ingest pipeline ---
# Object storage (choose ONE per tenant via config/integrations/powerapps_*.json)
# Azure Blob
AZURE_STORAGE_ACCOUNT=
AZURE_STORAGE_CONTAINER=
AZURE_STORAGE_SAS_TOKEN=
AZURE_STORAGE_CONNECTION_STRING=
# AWS S3
AWS_REGION=us-east-1
S3_BUCKET=
S3_KMS_KEY_ID=
# Vector store (choose ONE per tenant)
# Azure AI Search
AZURE_SEARCH_ENDPOINT=
AZURE_SEARCH_KEY=
# AWS OpenSearch Serverless / managed
OPENSEARCH_ENDPOINT=
# AWS Bedrock Knowledge Base (optional, fully managed alt to OpenSearch)
BEDROCK_KB_ID=
BEDROCK_KB_DATA_SOURCE_ID=
BEDROCK_EMBEDDING_MODEL_ID=amazon.titan-embed-text-v2:0
# Google Cloud path (GCS + Vertex AI embeddings + Vertex Vector Search)
GOOGLE_CLOUD_PROJECT=
GOOGLE_APPLICATION_CREDENTIALS=
GCS_BUCKET=
VERTEX_AI_LOCATION=us-central1
VERTEX_AI_EMBEDDING_MODEL=text-embedding-005
VERTEX_AI_EMBEDDING_DIMENSION=768
VERTEX_AI_INDEX_ID=
VERTEX_AI_INDEX_ENDPOINT_ID=
VERTEX_AI_DEPLOYED_INDEX_ID=
# EHR Data Miner (mockehr) upstream provider integration
# Preferred neutral variable. When set, mockehr fetches /patients/{id} and /patients/{id}/labs from the upstream provider.
# Example: http://backend:8000 (Docker) or http://localhost:8000 (host)
EXTERNAL_MOCKEHR_BASE_URL=
# Backward-compatible alias for existing wise-ai setups. Used only when EXTERNAL_MOCKEHR_BASE_URL is empty.
WISE_MOCKEHR_BASE_URL=
# Optional response source label for observability fields returned by mockehr.
MOCKEHR_PRIMARY_SOURCE_LABEL=external_mockehr