Skip to content

Commit d913926

Browse files
AFDudleyclaude
andcommitted
feat(k8s): namespace-per-deployment for resource isolation and cleanup
Each deployment now gets its own Kubernetes namespace (laconic-{deployment_id}). This provides: - Resource isolation between deployments on the same cluster - Simplified cleanup: deleting the namespace cascades to all namespaced resources - No orphaned resources possible when deployment IDs change Changes: - Set k8s_namespace based on deployment name in __init__ - Add _ensure_namespace() to create namespace before deploying resources - Add _delete_namespace() for cleanup - Simplify down() to just delete PVs (cluster-scoped) and the namespace - Fix hardcoded "default" namespace in logs function Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent b41e0cb commit d913926

2 files changed

Lines changed: 52 additions & 103 deletions

File tree

stack_orchestrator/deploy/k8s/deploy_k8s.py

Lines changed: 52 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ class K8sDeployer(Deployer):
9696
core_api: client.CoreV1Api
9797
apps_api: client.AppsV1Api
9898
networking_api: client.NetworkingV1Api
99-
k8s_namespace: str = "default"
99+
k8s_namespace: str
100100
kind_cluster_name: str
101101
skip_cluster_management: bool
102102
cluster_info: ClusterInfo
@@ -113,13 +113,16 @@ def __init__(
113113
) -> None:
114114
self.type = type
115115
self.skip_cluster_management = False
116+
self.k8s_namespace = "default" # Will be overridden below if context exists
116117
# TODO: workaround pending refactoring above to cope with being
117118
# created with a null deployment_context
118119
if deployment_context is None:
119120
return
120121
self.deployment_dir = deployment_context.deployment_dir
121122
self.deployment_context = deployment_context
122123
self.kind_cluster_name = compose_project_name
124+
# Use deployment-specific namespace for resource isolation and easy cleanup
125+
self.k8s_namespace = f"laconic-{compose_project_name}"
123126
self.cluster_info = ClusterInfo()
124127
self.cluster_info.int(
125128
compose_files,
@@ -149,6 +152,46 @@ def connect_api(self):
149152
self.apps_api = client.AppsV1Api()
150153
self.custom_obj_api = client.CustomObjectsApi()
151154

155+
def _ensure_namespace(self):
156+
"""Create the deployment namespace if it doesn't exist."""
157+
if opts.o.dry_run:
158+
print(f"Dry run: would create namespace {self.k8s_namespace}")
159+
return
160+
try:
161+
self.core_api.read_namespace(name=self.k8s_namespace)
162+
if opts.o.debug:
163+
print(f"Namespace {self.k8s_namespace} already exists")
164+
except ApiException as e:
165+
if e.status == 404:
166+
# Create the namespace
167+
ns = client.V1Namespace(
168+
metadata=client.V1ObjectMeta(
169+
name=self.k8s_namespace,
170+
labels={"app": self.cluster_info.app_name},
171+
)
172+
)
173+
self.core_api.create_namespace(body=ns)
174+
if opts.o.debug:
175+
print(f"Created namespace {self.k8s_namespace}")
176+
else:
177+
raise
178+
179+
def _delete_namespace(self):
180+
"""Delete the deployment namespace and all resources within it."""
181+
if opts.o.dry_run:
182+
print(f"Dry run: would delete namespace {self.k8s_namespace}")
183+
return
184+
try:
185+
self.core_api.delete_namespace(name=self.k8s_namespace)
186+
if opts.o.debug:
187+
print(f"Deleted namespace {self.k8s_namespace}")
188+
except ApiException as e:
189+
if e.status == 404:
190+
if opts.o.debug:
191+
print(f"Namespace {self.k8s_namespace} not found")
192+
else:
193+
raise
194+
152195
def _create_volume_data(self):
153196
# Create the host-path-mounted PVs for this deployment
154197
pvs = self.cluster_info.get_pvs()
@@ -314,6 +357,8 @@ def up(self, detach, skip_cluster_management, services):
314357
load_images_into_kind(self.kind_cluster_name, local_images)
315358
# Note: if no local containers defined, all images come from registries
316359
self.connect_api()
360+
# Create deployment-specific namespace for resource isolation
361+
self._ensure_namespace()
317362
if self.is_kind() and not self.skip_cluster_management:
318363
# Configure ingress controller (not installed by default in kind)
319364
# Skip if already running (idempotent for shared cluster)
@@ -381,17 +426,12 @@ def up(self, detach, skip_cluster_management, services):
381426
print("NodePort created:")
382427
print(f"{nodeport_resp}")
383428

384-
def down(self, timeout, volumes, skip_cluster_management): # noqa: C901
429+
def down(self, timeout, volumes, skip_cluster_management):
385430
self.skip_cluster_management = skip_cluster_management
386431
self.connect_api()
387432

388-
# Query K8s for resources by label selector instead of generating names
389-
# from config. This ensures we clean up orphaned resources when deployment
390-
# IDs change (e.g., after force_redeploy).
391-
label_selector = f"app={self.cluster_info.app_name}"
392-
433+
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
393434
if volumes:
394-
# Delete PVs for this deployment (PVs use volume-label pattern)
395435
try:
396436
pvs = self.core_api.list_persistent_volume(
397437
label_selector=f"app={self.cluster_info.app_name}"
@@ -407,97 +447,9 @@ def down(self, timeout, volumes, skip_cluster_management): # noqa: C901
407447
if opts.o.debug:
408448
print(f"Error listing PVs: {e}")
409449

410-
# Delete PVCs for this deployment
411-
try:
412-
pvcs = self.core_api.list_namespaced_persistent_volume_claim(
413-
namespace=self.k8s_namespace, label_selector=label_selector
414-
)
415-
for pvc in pvcs.items:
416-
if opts.o.debug:
417-
print(f"Deleting PVC: {pvc.metadata.name}")
418-
try:
419-
self.core_api.delete_namespaced_persistent_volume_claim(
420-
name=pvc.metadata.name, namespace=self.k8s_namespace
421-
)
422-
except ApiException as e:
423-
_check_delete_exception(e)
424-
except ApiException as e:
425-
if opts.o.debug:
426-
print(f"Error listing PVCs: {e}")
427-
428-
# Delete ConfigMaps for this deployment
429-
try:
430-
cfg_maps = self.core_api.list_namespaced_config_map(
431-
namespace=self.k8s_namespace, label_selector=label_selector
432-
)
433-
for cfg_map in cfg_maps.items:
434-
if opts.o.debug:
435-
print(f"Deleting ConfigMap: {cfg_map.metadata.name}")
436-
try:
437-
self.core_api.delete_namespaced_config_map(
438-
name=cfg_map.metadata.name, namespace=self.k8s_namespace
439-
)
440-
except ApiException as e:
441-
_check_delete_exception(e)
442-
except ApiException as e:
443-
if opts.o.debug:
444-
print(f"Error listing ConfigMaps: {e}")
445-
446-
# Delete Deployments for this deployment
447-
try:
448-
deployments = self.apps_api.list_namespaced_deployment(
449-
namespace=self.k8s_namespace, label_selector=label_selector
450-
)
451-
for deployment in deployments.items:
452-
if opts.o.debug:
453-
print(f"Deleting Deployment: {deployment.metadata.name}")
454-
try:
455-
self.apps_api.delete_namespaced_deployment(
456-
name=deployment.metadata.name, namespace=self.k8s_namespace
457-
)
458-
except ApiException as e:
459-
_check_delete_exception(e)
460-
except ApiException as e:
461-
if opts.o.debug:
462-
print(f"Error listing Deployments: {e}")
463-
464-
# Delete Services for this deployment (includes both ClusterIP and NodePort)
465-
try:
466-
services = self.core_api.list_namespaced_service(
467-
namespace=self.k8s_namespace, label_selector=label_selector
468-
)
469-
for service in services.items:
470-
if opts.o.debug:
471-
print(f"Deleting Service: {service.metadata.name}")
472-
try:
473-
self.core_api.delete_namespaced_service(
474-
namespace=self.k8s_namespace, name=service.metadata.name
475-
)
476-
except ApiException as e:
477-
_check_delete_exception(e)
478-
except ApiException as e:
479-
if opts.o.debug:
480-
print(f"Error listing Services: {e}")
481-
482-
# Delete Ingresses for this deployment
483-
try:
484-
ingresses = self.networking_api.list_namespaced_ingress(
485-
namespace=self.k8s_namespace, label_selector=label_selector
486-
)
487-
for ingress in ingresses.items:
488-
if opts.o.debug:
489-
print(f"Deleting Ingress: {ingress.metadata.name}")
490-
try:
491-
self.networking_api.delete_namespaced_ingress(
492-
name=ingress.metadata.name, namespace=self.k8s_namespace
493-
)
494-
except ApiException as e:
495-
_check_delete_exception(e)
496-
if not ingresses.items and opts.o.debug:
497-
print("No ingress to delete")
498-
except ApiException as e:
499-
if opts.o.debug:
500-
print(f"Error listing Ingresses: {e}")
450+
# Delete the deployment namespace - this cascades to all namespaced resources
451+
# (PVCs, ConfigMaps, Deployments, Services, Ingresses, etc.)
452+
self._delete_namespace()
501453

502454
if self.is_kind() and not self.skip_cluster_management:
503455
# Destroy the kind cluster
@@ -635,7 +587,7 @@ def logs(self, services, tail, follow, stream):
635587
log_data = ""
636588
for container in containers:
637589
container_log = self.core_api.read_namespaced_pod_log(
638-
k8s_pod_name, namespace="default", container=container
590+
k8s_pod_name, namespace=self.k8s_namespace, container=container
639591
)
640592
container_log_lines = container_log.splitlines()
641593
for line in container_log_lines:

stack_orchestrator/deploy/spec.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,6 @@ def get_volume_resources(self):
128128
def get_http_proxy(self):
129129
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
130130

131-
def get_acme_email(self):
132-
return self.obj.get(constants.network_key, {}).get("acme-email", "")
133-
134131
def get_annotations(self):
135132
return self.obj.get(constants.annotations_key, {})
136133

0 commit comments

Comments
 (0)