diff --git a/README.md b/README.md index 12896128..2602aabf 100644 --- a/README.md +++ b/README.md @@ -43,9 +43,37 @@ Now you can use one of following storageClasses: * `csi-driver-lvm-linear` * `csi-driver-lvm-mirror` * `csi-driver-lvm-striped` +* `csi-driver-lvm-linear-encrypted` +* `csi-driver-lvm-mirror-encrypted` +* `csi-driver-lvm-striped-encrypted` To get the previous old and now deprecated `csi-lvm-sc-linear`, ... storageclasses, set helm-chart value `compat03x=true`. +## Encryption ## + +csi-driver-lvm supports LUKS2 encryption for volumes at rest. When encryption is enabled, the LVM logical volume is formatted with LUKS2 and a dm-crypt mapper device is used transparently for all I/O. + +### Setup ### + +1. Create a Kubernetes Secret containing the LUKS passphrase: + +```bash +kubectl create secret generic csi-lvm-encryption-secret \ + --from-literal=passphrase='my-secret-passphrase' +``` + +2. Create PVCs using one of the encrypted StorageClasses. The encryption is handled transparently by the driver. + +### How it works ### + +- **NodeStageVolume**: LUKS-formats the LV (first use only), then opens it via `cryptsetup luksOpen`, creating a `/dev/mapper/csi-lvm-` device +- **NodePublishVolume**: Mounts the mapper device (instead of the raw LV) to the target path +- **NodeUnpublishVolume**: Unmounts as usual +- **NodeUnstageVolume**: Closes the LUKS device via `cryptsetup luksClose` +- **Volume expansion**: The LV is extended first, then the LUKS layer is resized, then the filesystem + +Both filesystem and raw block access types are supported with encryption. + ## Migration ## If you want to migrate your existing PVC to / from csi-driver-lvm, you can use [korb](https://github.com/BeryJu/korb). diff --git a/charts/csi-driver-lvm/templates/daemonset.yaml b/charts/csi-driver-lvm/templates/daemonset.yaml index fc5b310f..bbc8b2aa 100644 --- a/charts/csi-driver-lvm/templates/daemonset.yaml +++ b/charts/csi-driver-lvm/templates/daemonset.yaml @@ -308,6 +308,8 @@ spec: - mountPath: /run/lock/lvm name: lvmlock mountPropagation: Bidirectional + - mountPath: /run/cryptsetup + name: cryptsetup-run - name: liveness-probe args: - --csi-address=/csi/csi.sock @@ -367,4 +369,7 @@ spec: path: {{ .Values.lvm.hostWritePath }}/lock type: DirectoryOrCreate name: lvmlock + - emptyDir: + medium: Memory + name: cryptsetup-run --- diff --git a/charts/csi-driver-lvm/templates/storageclasses.yaml b/charts/csi-driver-lvm/templates/storageclasses.yaml index 177c3919..c04b5cb0 100644 --- a/charts/csi-driver-lvm/templates/storageclasses.yaml +++ b/charts/csi-driver-lvm/templates/storageclasses.yaml @@ -61,3 +61,75 @@ allowVolumeExpansion: true parameters: type: "striped" {{ end }} +--- +{{- $storageClass := .Values.storageClasses.linearEncrypted -}} +{{ if $storageClass.enabled }} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ .Values.lvm.storageClassStub }}-linear-encrypted +{{- if not (empty $storageClass.additionalAnnotations) }} + annotations: + {{- $storageClass.additionalAnnotations | toYaml | nindent 4 -}} +{{ end }} + labels: + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +provisioner: {{ .Values.lvm.driverName }} +reclaimPolicy: {{ $storageClass.reclaimPolicy }} +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +parameters: + type: "linear" + encryption: "true" + csi.storage.k8s.io/node-stage-secret-name: {{ $storageClass.encryptionSecret.name }} + csi.storage.k8s.io/node-stage-secret-namespace: {{ $storageClass.encryptionSecret.namespace }} +{{ end }} +--- +{{- $storageClass := .Values.storageClasses.mirrorEncrypted -}} +{{ if $storageClass.enabled }} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ .Values.lvm.storageClassStub }}-mirror-encrypted +{{- if not (empty $storageClass.additionalAnnotations) }} + annotations: + {{- $storageClass.additionalAnnotations | toYaml | nindent 4 -}} +{{ end }} + labels: + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +provisioner: {{ .Values.lvm.driverName }} +reclaimPolicy: {{ $storageClass.reclaimPolicy }} +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +parameters: + type: "mirror" + encryption: "true" + csi.storage.k8s.io/node-stage-secret-name: {{ $storageClass.encryptionSecret.name }} + csi.storage.k8s.io/node-stage-secret-namespace: {{ $storageClass.encryptionSecret.namespace }} +{{ end }} +--- +{{- $storageClass := .Values.storageClasses.stripedEncrypted -}} +{{ if $storageClass.enabled }} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ .Values.lvm.storageClassStub }}-striped-encrypted +{{- if not (empty $storageClass.additionalAnnotations) }} + annotations: + {{- $storageClass.additionalAnnotations | toYaml | nindent 4 -}} +{{ end }} + labels: + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +provisioner: {{ .Values.lvm.driverName }} +reclaimPolicy: {{ $storageClass.reclaimPolicy }} +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +parameters: + type: "striped" + encryption: "true" + csi.storage.k8s.io/node-stage-secret-name: {{ $storageClass.encryptionSecret.name }} + csi.storage.k8s.io/node-stage-secret-namespace: {{ $storageClass.encryptionSecret.namespace }} +{{ end }} diff --git a/charts/csi-driver-lvm/values.yaml b/charts/csi-driver-lvm/values.yaml index 765dadb5..70063871 100644 --- a/charts/csi-driver-lvm/values.yaml +++ b/charts/csi-driver-lvm/values.yaml @@ -56,6 +56,27 @@ storageClasses: enabled: true additionalAnnotations: [] reclaimPolicy: Delete + linearEncrypted: + enabled: true + additionalAnnotations: [] + reclaimPolicy: Delete + encryptionSecret: + name: csi-lvm-encryption-secret + namespace: default + stripedEncrypted: + enabled: true + additionalAnnotations: [] + reclaimPolicy: Delete + encryptionSecret: + name: csi-lvm-encryption-secret + namespace: default + mirrorEncrypted: + enabled: true + additionalAnnotations: [] + reclaimPolicy: Delete + encryptionSecret: + name: csi-lvm-encryption-secret + namespace: default nodeSelector: # The plugin daemonset will run on all nodes if it has a toleration, diff --git a/cmd/lvmplugin/Dockerfile b/cmd/lvmplugin/Dockerfile index cf227478..01671b11 100644 --- a/cmd/lvmplugin/Dockerfile +++ b/cmd/lvmplugin/Dockerfile @@ -2,7 +2,7 @@ FROM alpine:3.22 ARG TARGETPLATFORM LABEL maintainer="metal-stack authors " -RUN apk add lvm2 lvm2-extra e2fsprogs e2fsprogs-extra smartmontools nvme-cli util-linux device-mapper xfsprogs xfsprogs-extra +RUN apk add lvm2 lvm2-extra e2fsprogs e2fsprogs-extra smartmontools nvme-cli util-linux device-mapper xfsprogs xfsprogs-extra cryptsetup COPY --chmod=755 bin/${TARGETPLATFORM}/lvmplugin /lvmplugin USER root ENTRYPOINT ["/lvmplugin"] diff --git a/examples/csi-encryption-secret.yaml b/examples/csi-encryption-secret.yaml new file mode 100644 index 00000000..45f5c91b --- /dev/null +++ b/examples/csi-encryption-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: csi-lvm-encryption-secret + namespace: default +type: Opaque +stringData: + passphrase: "my-secret-passphrase" diff --git a/examples/csi-storageclass-linear-encrypted.yaml b/examples/csi-storageclass-linear-encrypted.yaml new file mode 100644 index 00000000..5552a947 --- /dev/null +++ b/examples/csi-storageclass-linear-encrypted.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-lvm-sc-linear-encrypted +provisioner: lvm.csi.metal-stack.io +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +parameters: + type: "linear" + encryption: "true" + csi.storage.k8s.io/node-stage-secret-name: csi-lvm-encryption-secret + csi.storage.k8s.io/node-stage-secret-namespace: default diff --git a/examples/csi-storageclass-mirror-encrypted.yaml b/examples/csi-storageclass-mirror-encrypted.yaml new file mode 100644 index 00000000..b9d148db --- /dev/null +++ b/examples/csi-storageclass-mirror-encrypted.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-lvm-sc-mirror-encrypted +provisioner: lvm.csi.metal-stack.io +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +parameters: + type: "mirror" + encryption: "true" + csi.storage.k8s.io/node-stage-secret-name: csi-lvm-encryption-secret + csi.storage.k8s.io/node-stage-secret-namespace: default diff --git a/examples/csi-storageclass-striped-encrypted.yaml b/examples/csi-storageclass-striped-encrypted.yaml new file mode 100644 index 00000000..4614e60f --- /dev/null +++ b/examples/csi-storageclass-striped-encrypted.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-lvm-sc-striped-encrypted +provisioner: lvm.csi.metal-stack.io +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +parameters: + type: "striped" + encryption: "true" + csi.storage.k8s.io/node-stage-secret-name: csi-lvm-encryption-secret + csi.storage.k8s.io/node-stage-secret-namespace: default diff --git a/go.mod b/go.mod index 86854b41..d9f89821 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/metal-stack/csi-driver-lvm -go 1.25 +go 1.25.0 require ( github.com/container-storage-interface/spec v1.11.0 @@ -8,7 +8,7 @@ require ( github.com/go-logr/logr v1.4.3 github.com/metal-stack/v v1.0.3 golang.org/x/net v0.43.0 - golang.org/x/sys v0.35.0 + golang.org/x/sys v0.38.0 google.golang.org/grpc v1.75.0 google.golang.org/protobuf v1.36.8 k8s.io/api v0.34.0 @@ -38,6 +38,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.9.0 // indirect + github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -65,6 +66,7 @@ require ( k8s.io/apiextensions-apiserver v0.33.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/mount-utils v0.35.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect diff --git a/go.sum b/go.sum index 5e0349a5..048b9834 100644 --- a/go.sum +++ b/go.sum @@ -71,6 +71,8 @@ github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4 github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/metal-stack/v v1.0.3 h1:Sh2oBlnxrCUD+mVpzfC8HiqL045YWkxs0gpTvkjppqs= github.com/metal-stack/v v1.0.3/go.mod h1:YTahEu7/ishwpYKnp/VaW/7nf8+PInogkfGwLcGPdXg= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -98,6 +100,7 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -106,6 +109,7 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -155,6 +159,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -204,6 +210,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/mount-utils v0.35.0 h1:UDE8RDeqmQh1u/yRd+GZC2EpDibiyAfmMEsm43lKNQI= +k8s.io/mount-utils v0.35.0/go.mod h1:ppC4d+mUpfbAJr/V2E8vvxeCEckNM+S5b0kQBQjd3Pw= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= diff --git a/pkg/lvm/luks.go b/pkg/lvm/luks.go new file mode 100644 index 00000000..d87516a7 --- /dev/null +++ b/pkg/lvm/luks.go @@ -0,0 +1,183 @@ +package lvm + +import ( + "errors" + "fmt" + "log/slog" + "os" + "os/exec" + "path" + "strings" +) + +const ( + cryptsetupCmd = "cryptsetup" + diskMapperPath = "/dev/mapper/" + defaultLuksHash = "sha256" + defaultLuksCipher = "aes-xts-plain64" + defaultLuksKeySize = "256" + luksMapperPrefix = "csi-lvm-" +) + +// LuksFormat formats the device with LUKS2 encryption using the given passphrase. +func LuksFormat(log *slog.Logger, devicePath, passphrase string) error { + args := []string{ + "-q", + "--type=luks2", + "--hash", defaultLuksHash, + "--cipher", defaultLuksCipher, + "--key-size", defaultLuksKeySize, + "--key-file", os.Stdin.Name(), + "--pbkdf-memory=65535", + "luksFormat", devicePath, + } + + log.Info("formatting device with LUKS", "device", devicePath) + + cmd := exec.Command(cryptsetupCmd, args...) + cmd.Stdin = strings.NewReader(passphrase) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("unable to luksFormat device %s: %w (%s)", devicePath, err, string(out)) + } + + return nil +} + +// LuksOpen opens a LUKS device and maps it to the given mapper name. +func LuksOpen(log *slog.Logger, devicePath, mapperName, passphrase string) error { + args := []string{ + "luksOpen", devicePath, mapperName, + "--disable-keyring", // LUKS2 volumes require passphrase on resize if keyring is not disabled on open + "--key-file", "/dev/stdin", + "--perf-same_cpu_crypt", + "--perf-submit_from_crypt_cpus", + "--perf-no_read_workqueue", + "--perf-no_write_workqueue", + } + + log.Info("opening LUKS device", "device", devicePath, "mapper", mapperName) + + cmd := exec.Command(cryptsetupCmd, args...) + cmd.Stdin = strings.NewReader(passphrase) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("unable to luksOpen device %s: %w (%s)", devicePath, err, string(out)) + } + + return nil +} + +// LuksClose closes the LUKS device with the given mapper name. +func LuksClose(log *slog.Logger, mapperName string) error { + mapperPath := diskMapperPath + mapperName + + log.Info("closing LUKS device", "mapper", mapperPath) + + cmd := exec.Command(cryptsetupCmd, "luksClose", mapperPath) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("unable to luksClose %s: %w (%s)", mapperPath, err, string(out)) + } + + return nil +} + +// LuksResize resizes the LUKS device with the given mapper name. +func LuksResize(log *slog.Logger, mapperName string) error { + mapperPath := diskMapperPath + mapperName + + log.Info("resizing LUKS device", "mapper", mapperPath) + + cmd := exec.Command(cryptsetupCmd, "resize", mapperPath) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("unable to resize LUKS device %s: %w (%s)", mapperPath, err, string(out)) + } + + return nil +} + +// LuksStatus returns true if the LUKS device with the given mapper name is active. +func LuksStatus(log *slog.Logger, mapperName string) bool { + mapperPath := path.Join(diskMapperPath, mapperName) + + cmd := exec.Command(cryptsetupCmd, "status", mapperPath) + out, err := cmd.CombinedOutput() + if err != nil { + log.Debug("LUKS status check failed", "mapper", mapperPath, "error", err, "output", string(out)) + return false + } + + return strings.Contains(string(out), "is active") +} + +// IsLuks returns true if the device at devicePath is a LUKS-formatted device. +func IsLuks(log *slog.Logger, devicePath string) (bool, error) { + cmd := exec.Command(cryptsetupCmd, "isLuks", devicePath) + out, err := cmd.CombinedOutput() + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + if exitErr.ExitCode() == 1 { + return false, nil + } + } + return false, fmt.Errorf("unable to check if device %s is LUKS: %w (%s)", devicePath, err, string(out)) + } + + return true, nil +} + +// LVDevicePath returns the device path for a logical volume. +func LVDevicePath(log *slog.Logger, vgName, lvName string) (string, error) { + devicePath := fmt.Sprintf("/dev/%s/%s", vgName, lvName) + + if _, err := os.Stat(devicePath); err != nil { + return "", fmt.Errorf("device path %s does not exist for %s/%s: %w", devicePath, vgName, lvName, err) + } + + log.Debug("resolved LV device path", "vg", vgName, "lv", lvName, "path", devicePath) + return devicePath, nil +} + +// EncryptedDevicePath returns the mapper device path if the LUKS device is active, +// or an empty string if it is not active. +func EncryptedDevicePath(log *slog.Logger, mapperName string) (string, error) { + mapperPath := path.Join(diskMapperPath, mapperName) + + _, err := os.Stat(mapperPath) + if os.IsNotExist(err) { + return "", nil + } + if err != nil { + return "", fmt.Errorf("unable to stat mapper path %s: %w", mapperPath, err) + } + + if !LuksStatus(log, mapperName) { + return "", nil + } + + return mapperPath, nil +} + +// LuksMapperName returns a deterministic mapper name for the given volume ID. +func LuksMapperName(volumeID string) string { + return luksMapperPrefix + volumeID +} + +// IsAESSupported checks if the CPU supports AES instructions. +func IsAESSupported() bool { + data, err := os.ReadFile("/proc/cpuinfo") + if err != nil { + return false + } + + for _, line := range strings.Split(string(data), "\n") { + if strings.HasPrefix(line, "flags") && strings.Contains(line, " aes") { + return true + } + } + + return false +} diff --git a/pkg/lvm/lvm.go b/pkg/lvm/lvm.go index e64e396d..17c58aac 100644 --- a/pkg/lvm/lvm.go +++ b/pkg/lvm/lvm.go @@ -6,6 +6,7 @@ import ( "log/slog" "os" "os/exec" + "path" "path/filepath" "strconv" "strings" @@ -34,8 +35,11 @@ type lsblk struct { } `json:"blockdevices"` } -func MountLV(log *slog.Logger, lvname, mountPath string, vgName string, fsType string) (string, error) { - lvPath := fmt.Sprintf("/dev/%s/%s", vgName, lvname) +func MountLV(log *slog.Logger, lvname, mountPath string, vgName string, fsType string, devicePath string) (string, error) { + lvPath := devicePath + if lvPath == "" { + lvPath = path.Join("/dev", vgName, lvname) + } formatted := false forceFormat := false @@ -111,8 +115,11 @@ func MountLV(log *slog.Logger, lvname, mountPath string, vgName string, fsType s return "", nil } -func BindMountLV(log *slog.Logger, lvname, mountPath string, vgName string) (string, error) { - lvPath := fmt.Sprintf("/dev/%s/%s", vgName, lvname) +func BindMountLV(log *slog.Logger, lvname, mountPath string, vgName string, devicePath string) (string, error) { + lvPath := devicePath + if lvPath == "" { + lvPath = path.Join("/dev", vgName, lvname) + } _, err := os.Create(mountPath) if err != nil { return "", fmt.Errorf("unable to create mount directory for lv:%s err:%w", lvname, err) diff --git a/pkg/server/node.go b/pkg/server/node.go index 207951ed..cab03bf0 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -16,6 +16,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/apimachinery/pkg/api/resource" + mountutils "k8s.io/mount-utils" + utilexec "k8s.io/utils/exec" ) const topologyKeyNode = "topology.lvm.csi/node" @@ -79,21 +81,35 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu d.log.Info("ephemeral mode: created volume", "volume", volID, "size", size) } + // Determine the device path: use encrypted mapper device if available, otherwise raw LV + var ( + volID = req.GetVolumeId() + mapperName = lvm.LuksMapperName(volID) + devicePath = "" + ) + encryptedPath, err := lvm.EncryptedDevicePath(d.log, mapperName) + if err != nil { + return nil, fmt.Errorf("unable to check encrypted device path: %w", err) + } + if encryptedPath != "" { + devicePath = encryptedPath + } + if req.GetVolumeCapability().GetBlock() != nil { - output, err := lvm.BindMountLV(d.log, req.GetVolumeId(), targetPath, d.vgName) + output, err := lvm.BindMountLV(d.log, volID, targetPath, d.vgName, devicePath) if err != nil { return nil, fmt.Errorf("unable to bind mount lv: %w output:%s", err, output) } // FIXME: VolumeCapability is a struct and not the size - d.log.Info("block lv", "id", req.GetVolumeId(), "size", req.GetVolumeCapability(), "vg", d.vgName, "devices", d.devicesPattern, "created at", targetPath) + d.log.Info("block lv", "id", volID, "size", req.GetVolumeCapability(), "vg", d.vgName, "devices", d.devicesPattern, "created at", targetPath) } else if req.GetVolumeCapability().GetMount() != nil { - output, err := lvm.MountLV(d.log, req.GetVolumeId(), targetPath, d.vgName, req.GetVolumeCapability().GetMount().GetFsType()) + output, err := lvm.MountLV(d.log, volID, targetPath, d.vgName, req.GetVolumeCapability().GetMount().GetFsType(), devicePath) if err != nil { return nil, fmt.Errorf("unable to mount lv: %w output:%s", err, output) } // FIXME: VolumeCapability is a struct and not the size - d.log.Info("mounted lv", "id", req.GetVolumeId(), "size", req.GetVolumeCapability(), "vg", d.vgName, "devices", d.devicesPattern, "created at", targetPath) + d.log.Info("mounted lv", "id", volID, "size", req.GetVolumeCapability(), "vg", d.vgName, "devices", d.devicesPattern, "created at", targetPath) } return &csi.NodePublishVolumeResponse{}, nil @@ -138,6 +154,43 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe return nil, status.Error(codes.InvalidArgument, "volume Capability missing in request") } + volCtx := req.GetVolumeContext() + if volCtx["encryption"] == "true" { + passphrase, ok := req.GetSecrets()["passphrase"] + if !ok || passphrase == "" { + return nil, status.Error(codes.InvalidArgument, "encryption enabled but no passphrase provided in secrets") + } + + volumeID := req.GetVolumeId() + devicePath, err := lvm.LVDevicePath(d.log, d.vgName, volumeID) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to resolve LV device path: %v", err) + } + mapperName := lvm.LuksMapperName(volumeID) + + // Check if the device is already a LUKS device + isLuks, err := lvm.IsLuks(d.log, devicePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to check if device is LUKS: %v", err) + } + + if !isLuks { + d.log.Info("LUKS formatting device", "device", devicePath) + if err := lvm.LuksFormat(d.log, devicePath, passphrase); err != nil { + return nil, status.Errorf(codes.Internal, "failed to LUKS format device: %v", err) + } + } + + // Open the LUKS device if not already open + if !lvm.LuksStatus(d.log, mapperName) { + if err := lvm.LuksOpen(d.log, devicePath, mapperName, passphrase); err != nil { + return nil, status.Errorf(codes.Internal, "failed to open LUKS device: %v", err) + } + } + + d.log.Info("LUKS device staged", "device", devicePath, "mapper", mapperName) + } + return &csi.NodeStageVolumeResponse{}, nil } @@ -150,6 +203,24 @@ func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolu return nil, status.Error(codes.InvalidArgument, "target path missing in request") } + var ( + volumeID = req.GetVolumeId() + mapperName = lvm.LuksMapperName(volumeID) + ) + + // Check if there is an active LUKS device for this volume + encryptedPath, err := lvm.EncryptedDevicePath(d.log, mapperName) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to check encrypted device path: %v", err) + } + + if encryptedPath != "" { + d.log.Info("closing LUKS device", "mapper", mapperName) + if err := lvm.LuksClose(d.log, mapperName); err != nil { + return nil, status.Errorf(codes.Internal, "failed to close LUKS device: %v", err) + } + } + return &csi.NodeUnstageVolumeResponse{}, nil } @@ -253,10 +324,38 @@ func (d *Driver) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolume isBlock = true } - output, err := lvm.ExtendLVS(d.log, d.vgName, volID, uint64(capacity), isBlock) //nolint:gosec + // For encrypted volumes, we need to extend the LV without auto-resizing the filesystem, + // then resize the LUKS layer, and then resize the filesystem separately. + var mapperName = lvm.LuksMapperName(volID) + + encryptedPath, err := lvm.EncryptedDevicePath(d.log, mapperName) if err != nil { - return nil, fmt.Errorf("unable to umount lv: %w output:%s", err, output) + return nil, fmt.Errorf("unable to check encrypted device path: %w", err) + } + if encryptedPath != "" { + // For encrypted volumes: extend LV without filesystem resize, then resize LUKS + output, err := lvm.ExtendLVS(d.log, d.vgName, volID, uint64(capacity), true) //nolint:gosec + if err != nil { + return nil, fmt.Errorf("unable to extend lv: %w output:%s", err, output) + } + + if err := lvm.LuksResize(d.log, mapperName); err != nil { + return nil, fmt.Errorf("unable to resize LUKS device: %w", err) + } + + // For block volumes we're done; for filesystem volumes, resize the filesystem on the mapper device + if !isBlock { + resizer := mountutils.NewResizeFs(utilexec.New()) + if _, err := resizer.Resize(encryptedPath, volPath); err != nil { + return nil, fmt.Errorf("unable to resize filesystem on encrypted device %s: %w", encryptedPath, err) + } + } + } else { + output, err := lvm.ExtendLVS(d.log, d.vgName, volID, uint64(capacity), isBlock) //nolint:gosec + if err != nil { + return nil, fmt.Errorf("unable to extend lv: %w output:%s", err, output) + } } return &csi.NodeExpandVolumeResponse{ diff --git a/tests/bats/test.bats b/tests/bats/test.bats index dbd77f7b..2adc1019 100644 --- a/tests/bats/test.bats +++ b/tests/bats/test.bats @@ -279,7 +279,7 @@ } @test "mirror-integrity pod running" { - run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.mirror-integrity.vol.yaml --timeout=30s + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.mirror-integrity.vol.yaml --timeout=45s [ "$status" -eq 0 ] } @@ -365,6 +365,159 @@ [ "$status" -eq 0 ] } +## Encryption tests + +@test "create encryption secret" { + run kubectl apply -f files/secret.encryption.yaml --wait --timeout=10s + [ "$status" -eq 0 ] +} + +@test "create storageclass linear-encrypted" { + run kubectl apply -f files/storageclass.linear-encrypted.yaml --wait --timeout=10s + [ "$status" -eq 0 ] +} + +@test "create encrypted linear pvc" { + run kubectl apply -f files/pvc.encrypted-linear.yaml --wait --timeout=30s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.phase}'=Pending -f files/pvc.encrypted-linear.yaml --timeout=30s + [ "$status" -eq 0 ] +} + +@test "deploy encrypted linear pod" { + run kubectl apply -f files/pod.encrypted-linear.vol.yaml --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "encrypted linear pod running" { + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.encrypted-linear.vol.yaml --timeout=60s + [ "$status" -eq 0 ] +} + +@test "encrypted linear pvc bound" { + run kubectl wait --for=jsonpath='{.status.phase}'=Bound -f files/pvc.encrypted-linear.yaml --timeout=30s + [ "$status" -eq 0 ] +} + +@test "resize encrypted linear pvc" { + run kubectl apply -f files/pvc.encrypted-linear.resize.yaml --wait --timeout=30s + [ "$status" -eq 0 ] + + # in some cases a pod restart is required + run kubectl replace --force -f files/pod.encrypted-linear.vol.yaml --wait --timeout=50s --grace-period=0 + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.capacity.storage}'=200Mi -f files/pvc.encrypted-linear.resize.yaml --timeout=30s + [ "$status" -eq 0 ] +} + +@test "delete encrypted linear pod" { + run kubectl delete -f files/pod.encrypted-linear.vol.yaml --grace-period=0 --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "delete resized encrypted linear pvc" { + run kubectl delete -f files/pvc.encrypted-linear.resize.yaml --grace-period=0 --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "create encrypted block pvc" { + run kubectl apply -f files/pvc.encrypted-block.yaml --wait --timeout=30s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.phase}'=Pending -f files/pvc.encrypted-block.yaml --timeout=30s + [ "$status" -eq 0 ] +} + +@test "deploy encrypted block pod" { + run kubectl apply -f files/pod.encrypted-block.vol.yaml --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "encrypted block pod running" { + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.encrypted-block.vol.yaml --timeout=60s + [ "$status" -eq 0 ] +} + +@test "encrypted block pvc bound" { + run kubectl wait --for=jsonpath='{.status.phase}'=Bound -f files/pvc.encrypted-block.yaml --timeout=30s + [ "$status" -eq 0 ] +} + +@test "resize encrypted block pvc" { + run kubectl apply -f files/pvc.encrypted-block.resize.yaml --wait --timeout=40s + [ "$status" -eq 0 ] + + # in some cases a pod restart is required + run kubectl replace --force -f files/pod.encrypted-block.vol.yaml --wait --timeout=50s --grace-period=0 + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.capacity.storage}'=200Mi -f files/pvc.encrypted-block.resize.yaml --timeout=90s + [ "$status" -eq 0 ] +} + +@test "delete encrypted block pod" { + run kubectl delete -f files/pod.encrypted-block.vol.yaml --grace-period=0 --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "delete resized encrypted block pvc" { + run kubectl delete -f files/pvc.encrypted-block.resize.yaml --grace-period=0 --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "write to encrypted volume and ensure data gets written" { + run kubectl apply -f files/pvc.encrypted-linear.yaml --wait --timeout=30s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.phase}'=Pending -f files/pvc.encrypted-linear.yaml --timeout=30s + [ "$status" -eq 0 ] + + run kubectl apply -f files/pod.encrypted-remount.vol.writing.yaml --wait --timeout=30s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.encrypted-remount.vol.writing.yaml --timeout=60s + [ "$status" -eq 0 ] + + sleep 2 + + run kubectl exec -t volume-encrypted-writing-test -- cat /remount/output.log | grep "Happily writing encrypted" + [ "$status" -eq 0 ] +} + +@test "remount encrypted volume and ensure that data is still present" { + run kubectl delete -f files/pod.encrypted-remount.vol.writing.yaml --wait --grace-period=0 --timeout=30s + [ "$status" -eq 0 ] + + run kubectl apply -f files/pod.encrypted-remount.vol.reading.yaml --wait --timeout=30s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.encrypted-remount.vol.reading.yaml --timeout=60s + [ "$status" -eq 0 ] + + sleep 1 + + run kubectl logs volume-encrypted-reading-test | grep "Happily writing encrypted" + [ "$status" -eq 0 ] + + run kubectl delete -f files/pod.encrypted-remount.vol.reading.yaml --wait --grace-period=0 --timeout=30s + [ "$status" -eq 0 ] + + run kubectl delete -f files/pvc.encrypted-linear.yaml --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "delete storageclass linear-encrypted" { + run kubectl delete -f files/storageclass.linear-encrypted.yaml --wait --timeout=30s + [ "$status" -eq 0 ] +} + +@test "delete encryption secret" { + run kubectl delete -f files/secret.encryption.yaml --wait --timeout=10s + [ "$status" -eq 0 ] +} + @test "deploy csi-driver-lvm eviction-controller" { run kubectl cordon csi-driver-lvm-worker2 [ "$status" -eq 0 ] diff --git a/tests/files/pod.encrypted-block.vol.yaml b/tests/files/pod.encrypted-block.vol.yaml new file mode 100644 index 00000000..f03c206b --- /dev/null +++ b/tests/files/pod.encrypted-block.vol.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + name: volume-test-encrypted-block +spec: + containers: + - name: volume-test-encrypted-block + image: alpine + imagePullPolicy: IfNotPresent + command: + - tail + - -f + - /etc/hosts + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10014 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + volumeDevices: + - name: encrypted-block + devicePath: /dev/xvda + resources: + limits: + cpu: 100m + memory: 100M + volumes: + - name: encrypted-block + persistentVolumeClaim: + claimName: lvm-pvc-encrypted-block diff --git a/tests/files/pod.encrypted-linear.vol.yaml b/tests/files/pod.encrypted-linear.vol.yaml new file mode 100644 index 00000000..39f3f9ca --- /dev/null +++ b/tests/files/pod.encrypted-linear.vol.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + name: volume-test-encrypted +spec: + containers: + - name: volume-test-encrypted + image: alpine + imagePullPolicy: IfNotPresent + command: + - tail + - -f + - /etc/hosts + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10014 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + volumeMounts: + - name: encrypted-linear + mountPath: /encrypted + resources: + limits: + cpu: 100m + memory: 100M + volumes: + - name: encrypted-linear + persistentVolumeClaim: + claimName: lvm-pvc-encrypted-linear diff --git a/tests/files/pod.encrypted-remount.vol.reading.yaml b/tests/files/pod.encrypted-remount.vol.reading.yaml new file mode 100644 index 00000000..47200143 --- /dev/null +++ b/tests/files/pod.encrypted-remount.vol.reading.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + name: volume-encrypted-reading-test +spec: + containers: + - name: volume-encrypted-reading-test + image: alpine + imagePullPolicy: IfNotPresent + command: + - tail + - -f + - /remount/output.log + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10014 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + volumeMounts: + - name: encrypted-remount + mountPath: /remount + resources: + limits: + cpu: 100m + memory: 100M + volumes: + - name: encrypted-remount + persistentVolumeClaim: + claimName: lvm-pvc-encrypted-linear diff --git a/tests/files/pod.encrypted-remount.vol.writing.yaml b/tests/files/pod.encrypted-remount.vol.writing.yaml new file mode 100644 index 00000000..5d86b0ca --- /dev/null +++ b/tests/files/pod.encrypted-remount.vol.writing.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + name: volume-encrypted-writing-test +spec: + containers: + - name: volume-encrypted-writing-test + image: alpine + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - while true; do echo "Happily writing encrypted at $(date)" >> /remount/output.log; sync; sleep 1; done + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10014 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + volumeMounts: + - name: encrypted-remount + mountPath: /remount + resources: + limits: + cpu: 100m + memory: 100M + volumes: + - name: encrypted-remount + persistentVolumeClaim: + claimName: lvm-pvc-encrypted-linear diff --git a/tests/files/pvc.encrypted-block.resize.yaml b/tests/files/pvc.encrypted-block.resize.yaml new file mode 100644 index 00000000..1ad8e7ff --- /dev/null +++ b/tests/files/pvc.encrypted-block.resize.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-pvc-encrypted-block +spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + storageClassName: csi-driver-lvm-linear-encrypted + resources: + requests: + storage: 200Mi diff --git a/tests/files/pvc.encrypted-block.yaml b/tests/files/pvc.encrypted-block.yaml new file mode 100644 index 00000000..050291dd --- /dev/null +++ b/tests/files/pvc.encrypted-block.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-pvc-encrypted-block +spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + storageClassName: csi-driver-lvm-linear-encrypted + resources: + requests: + storage: 100Mi diff --git a/tests/files/pvc.encrypted-linear.resize.yaml b/tests/files/pvc.encrypted-linear.resize.yaml new file mode 100644 index 00000000..b69370b0 --- /dev/null +++ b/tests/files/pvc.encrypted-linear.resize.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-pvc-encrypted-linear +spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-driver-lvm-linear-encrypted + resources: + requests: + storage: 200Mi diff --git a/tests/files/pvc.encrypted-linear.yaml b/tests/files/pvc.encrypted-linear.yaml new file mode 100644 index 00000000..e8fbaf66 --- /dev/null +++ b/tests/files/pvc.encrypted-linear.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-pvc-encrypted-linear +spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-driver-lvm-linear-encrypted + resources: + requests: + storage: 100Mi diff --git a/tests/files/secret.encryption.yaml b/tests/files/secret.encryption.yaml new file mode 100644 index 00000000..042010f1 --- /dev/null +++ b/tests/files/secret.encryption.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: csi-lvm-encryption-secret +type: Opaque +stringData: + passphrase: "integration-test-passphrase" diff --git a/tests/files/storageclass.linear-encrypted.yaml b/tests/files/storageclass.linear-encrypted.yaml new file mode 100644 index 00000000..37588483 --- /dev/null +++ b/tests/files/storageclass.linear-encrypted.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-driver-lvm-linear-encrypted +parameters: + type: linear + encryption: "true" + csi.storage.k8s.io/node-stage-secret-name: csi-lvm-encryption-secret + csi.storage.k8s.io/node-stage-secret-namespace: default +provisioner: lvm.csi.metal-stack.io +allowVolumeExpansion: true +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer