diff --git a/docs.md b/docs.md index 6cb433f7a..9186ac1a7 100644 --- a/docs.md +++ b/docs.md @@ -664,6 +664,17 @@ The only exception to this check is if the existing cluster already has a `NO_PR Prevent the update of objects if the secret specified in `.spec.rkeConfig.etcd.s3.cloudCredentialName` does not exist. +##### ETCD Snapshot Restore + +Validation for `spec.rkeConfig.etcdSnapshotRestore` is only triggered when this field is changed to a new, non-empty value. This check is intentionally skipped if the field is unchanged, which prevents blocking unrelated cluster updates (e.g., node scaling) if the referenced snapshot is deleted *after* a successful restore. + +When triggered, the following checks are performed: + +* The referenced snapshot in `etcdSnapshotRestore.name` must exist in the same namespace as the cluster. +* The `etcdSnapshotRestore.restoreRKEConfig` field must be a supported mode (`"none"`, `"kubernetesVersion"`, or `"all"`). +* If `restoreRKEConfig` is **`"kubernetesVersion"`**, the snapshot's metadata must be parsable and contain a `kubernetesVersion`. +* If `restoreRKEConfig` is **`"all"`**, the snapshot's metadata must be parsable and contain both `kubernetesVersion` and `rkeConfig`. + ### Mutation Checks #### On Create diff --git a/pkg/clients/clients.go b/pkg/clients/clients.go index b077f3e9c..62b02b238 100644 --- a/pkg/clients/clients.go +++ b/pkg/clients/clients.go @@ -8,6 +8,8 @@ import ( managementv3 "github.com/rancher/webhook/pkg/generated/controllers/management.cattle.io/v3" "github.com/rancher/webhook/pkg/generated/controllers/provisioning.cattle.io" provv1 "github.com/rancher/webhook/pkg/generated/controllers/provisioning.cattle.io/v1" + "github.com/rancher/webhook/pkg/generated/controllers/rke.cattle.io" + rkev1 "github.com/rancher/webhook/pkg/generated/controllers/rke.cattle.io/v1" "github.com/rancher/wrangler/v3/pkg/clients" "github.com/rancher/wrangler/v3/pkg/schemes" v1 "k8s.io/api/admissionregistration/v1" @@ -21,6 +23,7 @@ type Clients struct { MultiClusterManagement bool Management managementv3.Interface Provisioning provv1.Interface + RKE rkev1.Interface RoleTemplateResolver *auth.RoleTemplateResolver GlobalRoleResolver *auth.GlobalRoleResolver DefaultResolver validation.AuthorizationRuleResolver @@ -46,6 +49,11 @@ func New(ctx context.Context, rest *rest.Config, mcmEnabled bool) (*Clients, err return nil, err } + rke, err := rke.NewFactoryFromConfigWithOptions(rest, clients.FactoryOptions) + if err != nil { + return nil, err + } + if err = mgmt.Start(ctx, 5); err != nil { return nil, err } @@ -61,6 +69,7 @@ func New(ctx context.Context, rest *rest.Config, mcmEnabled bool) (*Clients, err Clients: *clients, Management: mgmt.Management().V3(), Provisioning: prov.Provisioning().V1(), + RKE: rke.Rke().V1(), MultiClusterManagement: mcmEnabled, DefaultResolver: validation.NewDefaultRuleResolver(rbacRestGetter, rbacRestGetter, rbacRestGetter, rbacRestGetter), } diff --git a/pkg/codegen/main.go b/pkg/codegen/main.go index 1bf941ec1..72dc93024 100644 --- a/pkg/codegen/main.go +++ b/pkg/codegen/main.go @@ -13,6 +13,7 @@ import ( catalogv1 "github.com/rancher/rancher/pkg/apis/catalog.cattle.io/v1" v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" v1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" controllergen "github.com/rancher/wrangler/v3/pkg/controller-gen" "github.com/rancher/wrangler/v3/pkg/controller-gen/args" "golang.org/x/tools/imports" @@ -63,6 +64,11 @@ func main() { &catalogv1.ClusterRepo{}, }, }, + "rke.cattle.io": { + Types: []interface{}{ + &rkev1.ETCDSnapshot{}, + }, + }, }, }) @@ -116,6 +122,11 @@ func main() { &auditlogv1.AuditPolicy{}, }, }, + "rke.cattle.io": { + Types: []interface{}{ + &rkev1.ETCDSnapshot{}, + }, + }, }); err != nil { fmt.Printf("ERROR: %v\n", err) } diff --git a/pkg/generated/controllers/rke.cattle.io/factory.go b/pkg/generated/controllers/rke.cattle.io/factory.go new file mode 100644 index 000000000..81b1bd6dc --- /dev/null +++ b/pkg/generated/controllers/rke.cattle.io/factory.go @@ -0,0 +1,72 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by codegen. DO NOT EDIT. + +package rke + +import ( + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/client-go/rest" +) + +type Factory struct { + *generic.Factory +} + +func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { + f, err := NewFactoryFromConfig(config) + if err != nil { + panic(err) + } + return f +} + +func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, nil) +} + +func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ + Namespace: namespace, + }) +} + +type FactoryOptions = generic.FactoryOptions + +func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { + f, err := generic.NewFactoryFromConfigWithOptions(config, opts) + return &Factory{ + Factory: f, + }, err +} + +func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { + f, err := NewFactoryFromConfigWithOptions(config, opts) + if err != nil { + panic(err) + } + return f +} + +func (c *Factory) Rke() Interface { + return New(c.ControllerFactory()) +} + +func (c *Factory) WithAgent(userAgent string) Interface { + return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) +} diff --git a/pkg/generated/controllers/rke.cattle.io/interface.go b/pkg/generated/controllers/rke.cattle.io/interface.go new file mode 100644 index 000000000..af7e13f35 --- /dev/null +++ b/pkg/generated/controllers/rke.cattle.io/interface.go @@ -0,0 +1,43 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by codegen. DO NOT EDIT. + +package rke + +import ( + "github.com/rancher/lasso/pkg/controller" + v1 "github.com/rancher/webhook/pkg/generated/controllers/rke.cattle.io/v1" +) + +type Interface interface { + V1() v1.Interface +} + +type group struct { + controllerFactory controller.SharedControllerFactory +} + +// New returns a new Interface. +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &group{ + controllerFactory: controllerFactory, + } +} + +func (g *group) V1() v1.Interface { + return v1.New(g.controllerFactory) +} diff --git a/pkg/generated/controllers/rke.cattle.io/v1/etcdsnapshot.go b/pkg/generated/controllers/rke.cattle.io/v1/etcdsnapshot.go new file mode 100644 index 000000000..142e88da4 --- /dev/null +++ b/pkg/generated/controllers/rke.cattle.io/v1/etcdsnapshot.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by codegen. DO NOT EDIT. + +package v1 + +import ( + "context" + "sync" + "time" + + v1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ETCDSnapshotController interface for managing ETCDSnapshot resources. +type ETCDSnapshotController interface { + generic.ControllerInterface[*v1.ETCDSnapshot, *v1.ETCDSnapshotList] +} + +// ETCDSnapshotClient interface for managing ETCDSnapshot resources in Kubernetes. +type ETCDSnapshotClient interface { + generic.ClientInterface[*v1.ETCDSnapshot, *v1.ETCDSnapshotList] +} + +// ETCDSnapshotCache interface for retrieving ETCDSnapshot resources in memory. +type ETCDSnapshotCache interface { + generic.CacheInterface[*v1.ETCDSnapshot] +} + +// ETCDSnapshotStatusHandler is executed for every added or modified ETCDSnapshot. Should return the new status to be updated +type ETCDSnapshotStatusHandler func(obj *v1.ETCDSnapshot, status v1.ETCDSnapshotStatus) (v1.ETCDSnapshotStatus, error) + +// ETCDSnapshotGeneratingHandler is the top-level handler that is executed for every ETCDSnapshot event. It extends ETCDSnapshotStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type ETCDSnapshotGeneratingHandler func(obj *v1.ETCDSnapshot, status v1.ETCDSnapshotStatus) ([]runtime.Object, v1.ETCDSnapshotStatus, error) + +// RegisterETCDSnapshotStatusHandler configures a ETCDSnapshotController to execute a ETCDSnapshotStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterETCDSnapshotStatusHandler(ctx context.Context, controller ETCDSnapshotController, condition condition.Cond, name string, handler ETCDSnapshotStatusHandler) { + statusHandler := &eTCDSnapshotStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterETCDSnapshotGeneratingHandler configures a ETCDSnapshotController to execute a ETCDSnapshotGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterETCDSnapshotGeneratingHandler(ctx context.Context, controller ETCDSnapshotController, apply apply.Apply, + condition condition.Cond, name string, handler ETCDSnapshotGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &eTCDSnapshotGeneratingHandler{ + ETCDSnapshotGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterETCDSnapshotStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type eTCDSnapshotStatusHandler struct { + client ETCDSnapshotClient + condition condition.Cond + handler ETCDSnapshotStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *eTCDSnapshotStatusHandler) sync(key string, obj *v1.ETCDSnapshot) (*v1.ETCDSnapshot, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type eTCDSnapshotGeneratingHandler struct { + ETCDSnapshotGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *eTCDSnapshotGeneratingHandler) Remove(key string, obj *v1.ETCDSnapshot) (*v1.ETCDSnapshot, error) { + if obj != nil { + return obj, nil + } + + obj = &v1.ETCDSnapshot{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured ETCDSnapshotGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *eTCDSnapshotGeneratingHandler) Handle(obj *v1.ETCDSnapshot, status v1.ETCDSnapshotStatus) (v1.ETCDSnapshotStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.ETCDSnapshotGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *eTCDSnapshotGeneratingHandler) isNewResourceVersion(obj *v1.ETCDSnapshot) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *eTCDSnapshotGeneratingHandler) storeResourceVersion(obj *v1.ETCDSnapshot) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/pkg/generated/controllers/rke.cattle.io/v1/interface.go b/pkg/generated/controllers/rke.cattle.io/v1/interface.go new file mode 100644 index 000000000..33848bca8 --- /dev/null +++ b/pkg/generated/controllers/rke.cattle.io/v1/interface.go @@ -0,0 +1,49 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by codegen. DO NOT EDIT. + +package v1 + +import ( + "github.com/rancher/lasso/pkg/controller" + v1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/schemes" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func init() { + schemes.Register(v1.AddToScheme) +} + +type Interface interface { + ETCDSnapshot() ETCDSnapshotController +} + +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &version{ + controllerFactory: controllerFactory, + } +} + +type version struct { + controllerFactory controller.SharedControllerFactory +} + +func (v *version) ETCDSnapshot() ETCDSnapshotController { + return generic.NewController[*v1.ETCDSnapshot, *v1.ETCDSnapshotList](schema.GroupVersionKind{Group: "rke.cattle.io", Version: "v1", Kind: "ETCDSnapshot"}, "etcdsnapshots", true, v.controllerFactory) +} diff --git a/pkg/generated/objects/rke.cattle.io/v1/objects.go b/pkg/generated/objects/rke.cattle.io/v1/objects.go new file mode 100644 index 000000000..986613fdc --- /dev/null +++ b/pkg/generated/objects/rke.cattle.io/v1/objects.go @@ -0,0 +1,62 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" + admissionv1 "k8s.io/api/admission/v1" +) + +// ETCDSnapshotOldAndNewFromRequest gets the old and new ETCDSnapshot objects, respectively, from the webhook request. +// If the request is a Delete operation, then the new object is the zero value for ETCDSnapshot. +// Similarly, if the request is a Create operation, then the old object is the zero value for ETCDSnapshot. +func ETCDSnapshotOldAndNewFromRequest(request *admissionv1.AdmissionRequest) (*v1.ETCDSnapshot, *v1.ETCDSnapshot, error) { + if request == nil { + return nil, nil, fmt.Errorf("nil request") + } + + object := &v1.ETCDSnapshot{} + oldObject := &v1.ETCDSnapshot{} + + if request.Operation != admissionv1.Delete { + err := json.Unmarshal(request.Object.Raw, object) + if err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal request object: %w", err) + } + } + + if request.Operation == admissionv1.Create { + return oldObject, object, nil + } + + err := json.Unmarshal(request.OldObject.Raw, oldObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal request oldObject: %w", err) + } + + return oldObject, object, nil +} + +// ETCDSnapshotFromRequest returns a ETCDSnapshot object from the webhook request. +// If the operation is a Delete operation, then the old object is returned. +// Otherwise, the new object is returned. +func ETCDSnapshotFromRequest(request *admissionv1.AdmissionRequest) (*v1.ETCDSnapshot, error) { + if request == nil { + return nil, fmt.Errorf("nil request") + } + + object := &v1.ETCDSnapshot{} + raw := request.Object.Raw + + if request.Operation == admissionv1.Delete { + raw = request.OldObject.Raw + } + + err := json.Unmarshal(raw, object) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal request object: %w", err) + } + + return object, nil +} diff --git a/pkg/resources/provisioning.cattle.io/v1/cluster/Cluster.md b/pkg/resources/provisioning.cattle.io/v1/cluster/Cluster.md index 2a65e8b25..74501c9cd 100644 --- a/pkg/resources/provisioning.cattle.io/v1/cluster/Cluster.md +++ b/pkg/resources/provisioning.cattle.io/v1/cluster/Cluster.md @@ -97,6 +97,17 @@ The only exception to this check is if the existing cluster already has a `NO_PR Prevent the update of objects if the secret specified in `.spec.rkeConfig.etcd.s3.cloudCredentialName` does not exist. +#### ETCD Snapshot Restore + +Validation for `spec.rkeConfig.etcdSnapshotRestore` is only triggered when this field is changed to a new, non-empty value. This check is intentionally skipped if the field is unchanged, which prevents blocking unrelated cluster updates (e.g., node scaling) if the referenced snapshot is deleted *after* a successful restore. + +When triggered, the following checks are performed: + +* The referenced snapshot in `etcdSnapshotRestore.name` must exist in the same namespace as the cluster. +* The `etcdSnapshotRestore.restoreRKEConfig` field must be a supported mode (`"none"`, `"kubernetesVersion"`, or `"all"`). +* If `restoreRKEConfig` is **`"kubernetesVersion"`**, the snapshot's metadata must be parsable and contain a `kubernetesVersion`. +* If `restoreRKEConfig` is **`"all"`**, the snapshot's metadata must be parsable and contain both `kubernetesVersion` and `rkeConfig`. + ## Mutation Checks ### On Create diff --git a/pkg/resources/provisioning.cattle.io/v1/cluster/validator.go b/pkg/resources/provisioning.cattle.io/v1/cluster/validator.go index 22e5d4bab..2620f94cb 100644 --- a/pkg/resources/provisioning.cattle.io/v1/cluster/validator.go +++ b/pkg/resources/provisioning.cattle.io/v1/cluster/validator.go @@ -1,9 +1,13 @@ package cluster import ( + "bytes" + "compress/gzip" "crypto/sha256" "encoding/base64" + "encoding/json" "fmt" + "io" "net/http" "path/filepath" "reflect" @@ -17,6 +21,7 @@ import ( "github.com/rancher/webhook/pkg/admission" "github.com/rancher/webhook/pkg/clients" v3 "github.com/rancher/webhook/pkg/generated/controllers/management.cattle.io/v3" + rkecontrollers "github.com/rancher/webhook/pkg/generated/controllers/rke.cattle.io/v1" psa "github.com/rancher/webhook/pkg/podsecurityadmission" "github.com/rancher/webhook/pkg/resources/common" corev1controller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" @@ -25,6 +30,7 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" authv1 "k8s.io/api/authorization/v1" k8sv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -56,6 +62,7 @@ func NewProvisioningClusterValidator(client *clients.Clients) *ProvisioningClust secretCache: client.Core.Secret().Cache(), psactCache: client.Management.PodSecurityAdmissionConfigurationTemplate().Cache(), featureCache: client.Management.Feature().Cache(), + etcdSnapshotCache: client.RKE.ETCDSnapshot().Cache(), }, } } @@ -91,6 +98,7 @@ type provisioningAdmitter struct { secretCache corev1controller.SecretCache psactCache v3.PodSecurityAdmissionConfigurationTemplateCache featureCache v3.FeatureCache + etcdSnapshotCache rkecontrollers.ETCDSnapshotCache } // Admit handles the webhook admission request sent to this webhook. @@ -163,6 +171,10 @@ func (p *provisioningAdmitter) Admit(request *admission.Request) (*admissionv1.A if response, err = p.validateS3Secret(oldCluster, cluster); err != nil || !response.Allowed { return response, err } + + if response, err = p.validateETCDSnapshotRestore(request, oldCluster, cluster); err != nil || !response.Allowed { + return response, err + } } if err := p.validatePSACT(request, response, cluster); err != nil || response.Result != nil { @@ -771,6 +783,129 @@ func (p *provisioningAdmitter) validateS3Secret(oldCluster, cluster *v1.Cluster) return admission.ResponseAllowed(), nil } +// validateETCDSnapshotRestore ensures that any requested ETCD restore +// (a) references an existing ETCDSnapshot, and +// (b) contains decodable metadata with a valid "provisioning-cluster-spec". +func (p *provisioningAdmitter) validateETCDSnapshotRestore(request *admission.Request, oldCluster, newCluster *v1.Cluster) (*admissionv1.AdmissionResponse, error) { + if request.Operation != admissionv1.Update { + return admission.ResponseAllowed(), nil + } + + // No RKEConfig means no restore spec. + if newCluster.Spec.RKEConfig == nil { + return admission.ResponseAllowed(), nil + } + + newRestore := newCluster.Spec.RKEConfig.ETCDSnapshotRestore + + // Allow if restore spec is being cleared or is empty. + if newRestore == nil || newRestore.Name == "" || newRestore.RestoreRKEConfig == "" { + return admission.ResponseAllowed(), nil + } + + var oldRestore *rkev1.ETCDSnapshotRestore + if oldCluster.Spec.RKEConfig != nil { + oldRestore = oldCluster.Spec.RKEConfig.ETCDSnapshotRestore + } + + // Allow if spec is unchanged, to avoid blocking unrelated cluster updates. + if equality.Semantic.DeepEqual(oldRestore, newRestore) { + return admission.ResponseAllowed(), nil + } + + snap, err := p.etcdSnapshotCache.Get(newCluster.Namespace, newRestore.Name) + if err != nil { + if apierrors.IsNotFound(err) { + return admission.ResponseBadRequest( + fmt.Sprintf("etcd restore references missing snapshot %s in namespace %s", newRestore.Name, newCluster.Namespace)), nil + } + return nil, fmt.Errorf("failed to get etcd snapshot %s/%s: %w", newCluster.Namespace, newRestore.Name, err) + } + + var clusterSpec *v1.ClusterSpec + var decodeErr error + // Only parse snapshot metadata if the restore mode requires it. + if newRestore.RestoreRKEConfig != "none" { + clusterSpec, decodeErr = parseSnapshotClusterSpec(snap) + if decodeErr != nil { + return admission.ResponseBadRequest( + fmt.Sprintf("invalid ETCD snapshot metadata for %s/%s: %v", snap.Namespace, snap.Name, decodeErr)), nil + } + } + + switch newRestore.RestoreRKEConfig { + case "none": + return admission.ResponseAllowed(), nil + + case "kubernetesVersion": + if clusterSpec.KubernetesVersion == "" { + return admission.ResponseBadRequest("snapshot metadata missing KubernetesVersion for kubernetesVersion restore"), nil + } + return admission.ResponseAllowed(), nil + + case "all": + if clusterSpec.RKEConfig == nil || clusterSpec.KubernetesVersion == "" { + return admission.ResponseBadRequest("snapshot metadata must include RKEConfig and KubernetesVersion for 'all' restore"), nil + } + return admission.ResponseAllowed(), nil + + default: + return admission.ResponseBadRequest(fmt.Sprintf("unsupported restore mode %s", newRestore.RestoreRKEConfig)), nil + } +} + +// parseSnapshotClusterSpec decodes snapshot.SnapshotFile.Metadata into a v1.ClusterSpec. +// The metadata is stored as a nested, gzipped, base64-encoded structure. +func parseSnapshotClusterSpec(snap *rkev1.ETCDSnapshot) (*v1.ClusterSpec, error) { + if snap == nil { + return nil, fmt.Errorf("nil snapshot") + } + if snap.SnapshotFile.Metadata == "" { + return nil, fmt.Errorf("no metadata present") + } + + // The top-level metadata string is a base64-encoded JSON map. + outerBytes, err := base64.StdEncoding.DecodeString(snap.SnapshotFile.Metadata) + if err != nil { + return nil, fmt.Errorf("metadata base64 decode failed: %w", err) + } + var outer map[string]string + if err := json.Unmarshal(outerBytes, &outer); err != nil { + return nil, fmt.Errorf("metadata JSON decode failed: %w", err) + } + + // The actual spec is in a specific key in that map. + innerB64, ok := outer["provisioning-cluster-spec"] + if !ok || innerB64 == "" { + return nil, fmt.Errorf(`metadata missing "provisioning-cluster-spec"`) + } + + // This inner value is also base64-encoded, containing gzipped data. + innerGz, err := base64.StdEncoding.DecodeString(innerB64) + if err != nil { + return nil, fmt.Errorf("inner base64 decode failed: %w", err) + } + + // Decompress the gzipped data. + zr, err := gzip.NewReader(bytes.NewReader(innerGz)) + if err != nil { + return nil, fmt.Errorf("gzip open failed: %w", err) + } + defer zr.Close() + + raw, err := io.ReadAll(zr) + if err != nil { + return nil, fmt.Errorf("gzip read failed: %w", err) + } + + // The decompressed data is the final cluster spec JSON. + var spec v1.ClusterSpec + if err := json.Unmarshal(raw, &spec); err != nil { + return nil, fmt.Errorf("cluster spec JSON decode failed: %w", err) + } + return &spec, nil +} + func validateAgentDeploymentCustomization(customization *v1.AgentDeploymentCustomization, path *field.Path) field.ErrorList { if customization == nil { return nil diff --git a/pkg/resources/provisioning.cattle.io/v1/cluster/validator_test.go b/pkg/resources/provisioning.cattle.io/v1/cluster/validator_test.go index 8102f620d..a8fb766da 100644 --- a/pkg/resources/provisioning.cattle.io/v1/cluster/validator_test.go +++ b/pkg/resources/provisioning.cattle.io/v1/cluster/validator_test.go @@ -1,6 +1,10 @@ package cluster import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" "fmt" "strings" "testing" @@ -12,11 +16,13 @@ import ( "github.com/rancher/webhook/pkg/resources/common" "github.com/rancher/wrangler/v3/pkg/generic/fake" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" admissionv1 "k8s.io/api/admission/v1" k8sv1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -2885,3 +2891,443 @@ func Test_ValidateRKEConfigChanged(t *testing.T) { }) } } + +// generateTestMetadata is a helper to create the nested metadata string +// as it's stored on the ETCDSnapshot object. +func generateTestMetadata(clusterSpec *v1.ClusterSpec) (string, error) { + specBytes, err := json.Marshal(clusterSpec) + if err != nil { + return "", err + } + + var gzipBuffer bytes.Buffer + gzipWriter := gzip.NewWriter(&gzipBuffer) + if _, err := gzipWriter.Write(specBytes); err != nil { + return "", err + } + if err := gzipWriter.Close(); err != nil { + return "", err + } + + innerBase64 := base64.StdEncoding.EncodeToString(gzipBuffer.Bytes()) + + outerMap := map[string]string{ + "provisioning-cluster-spec": innerBase64, + } + + outerBytes, err := json.Marshal(outerMap) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(outerBytes), nil +} + +func TestParseSnapshotClusterSpec(t *testing.T) { + asserts := assert.New(t) + requires := require.New(t) + + validClusterSpec := &v1.ClusterSpec{ + KubernetesVersion: "v1.25.0", + RKEConfig: &v1.RKEConfig{}, + } + validMetadata, err := generateTestMetadata(validClusterSpec) + requires.NoError(err, "failed to generate valid test metadata") + + var invalidJSONBytes bytes.Buffer + gzipWriter := gzip.NewWriter(&invalidJSONBytes) + _, err = gzipWriter.Write([]byte("this is not valid json")) + requires.NoError(err) + requires.NoError(gzipWriter.Close()) + + invalidInnerJSONOuterMap := map[string]string{ + "provisioning-cluster-spec": base64.StdEncoding.EncodeToString(invalidJSONBytes.Bytes()), + } + invalidJSONOuterBytes, err := json.Marshal(invalidInnerJSONOuterMap) + requires.NoError(err) + invalidInnerJSONMetadata := base64.StdEncoding.EncodeToString(invalidJSONOuterBytes) + + testCases := []struct { + name string + snapshot *rkev1.ETCDSnapshot + expectedSpec *v1.ClusterSpec + shouldError bool + expectedError string + }{ + { + name: "should error on nil snapshot", + snapshot: nil, + shouldError: true, + expectedError: "nil snapshot", + }, + { + name: "should error on empty metadata", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: ""}, + }, + shouldError: true, + expectedError: "no metadata present", + }, + { + name: "should error on invalid outer base64", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: "not-base64-at-all"}, + }, + shouldError: true, + expectedError: "metadata base64 decode failed", + }, + { + name: "should error on invalid outer JSON", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: base64.StdEncoding.EncodeToString([]byte("not json"))}, + }, + shouldError: true, + expectedError: "metadata JSON decode failed", + }, + { + name: "should error on missing spec key", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: base64.StdEncoding.EncodeToString([]byte(`{"wrong-key": "value"}`))}, + }, + shouldError: true, + expectedError: `metadata missing "provisioning-cluster-spec"`, + }, + { + name: "should error on invalid inner base64", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: base64.StdEncoding.EncodeToString([]byte(`{"provisioning-cluster-spec": "not-base64"}`))}, + }, + shouldError: true, + expectedError: "inner base64 decode failed", + }, + { + name: "should error on invalid gzip data", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: base64.StdEncoding.EncodeToString([]byte(`{"provisioning-cluster-spec": "` + base64.StdEncoding.EncodeToString([]byte("not gzip")) + `"}`))}, + }, + shouldError: true, + expectedError: "gzip open failed", + }, + { + name: "should error on invalid inner JSON", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: invalidInnerJSONMetadata}, + }, + shouldError: true, + expectedError: "cluster spec JSON decode failed", + }, + { + name: "should succeed with valid metadata", + snapshot: &rkev1.ETCDSnapshot{ + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: validMetadata}, + }, + expectedSpec: validClusterSpec, + shouldError: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(_ *testing.T) { + resultSpec, err := parseSnapshotClusterSpec(testCase.snapshot) + + if testCase.shouldError { + asserts.Error(err) + if testCase.expectedError != "" { + asserts.Contains(err.Error(), testCase.expectedError) + } + } else { + asserts.NoError(err) + asserts.Equal(testCase.expectedSpec, resultSpec) + } + }) + } +} + +func TestValidateETCDSnapshotRestore(t *testing.T) { + asserts := assert.New(t) + requires := require.New(t) + + const testNamespace = "test-ns" + const validAllSnapshotName = "valid-all-snapshot" + const validK8sSnapshotName = "valid-k8s-snapshot" + const invalidMetadataSnapshotName = "invalid-metadata-snapshot" + const missingK8sSnapshotName = "missing-k8s-snapshot" + const missingRKEConfigSnapshotName = "missing-rkeconfig-snapshot" + const nonExistentSnapshotName = "non-existent-snapshot" + const internalErrorSnapshotName = "internal-error-snapshot" + + // Create reusable specs + validAllSpec := &v1.ClusterSpec{ + KubernetesVersion: "v1.25.0", + RKEConfig: &v1.RKEConfig{}, + } + validK8sSpec := &v1.ClusterSpec{ + KubernetesVersion: "v1.25.0", + } + missingK8sSpec := &v1.ClusterSpec{ + RKEConfig: &v1.RKEConfig{}, + } + missingRKEConfigSpec := &v1.ClusterSpec{ + KubernetesVersion: "v1.25.0", + } + + // Create reusable metadata strings + validAllMetadata, err := generateTestMetadata(validAllSpec) + requires.NoError(err) + validK8sMetadata, err := generateTestMetadata(validK8sSpec) + requires.NoError(err) + missingK8sMetadata, err := generateTestMetadata(missingK8sSpec) + requires.NoError(err) + missingRKEConfigMetadata, err := generateTestMetadata(missingRKEConfigSpec) + requires.NoError(err) + + // Create reusable snapshot objects (fixtures for mock returns) + validAllSnapshot := &rkev1.ETCDSnapshot{ + ObjectMeta: metav1.ObjectMeta{Name: validAllSnapshotName, Namespace: testNamespace}, + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: validAllMetadata}, + } + validK8sSnapshot := &rkev1.ETCDSnapshot{ + ObjectMeta: metav1.ObjectMeta{Name: validK8sSnapshotName, Namespace: testNamespace}, + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: validK8sMetadata}, + } + invalidMetadataSnapshot := &rkev1.ETCDSnapshot{ + ObjectMeta: metav1.ObjectMeta{Name: invalidMetadataSnapshotName, Namespace: testNamespace}, + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: "not-valid-at-all"}, + } + missingK8sSnapshot := &rkev1.ETCDSnapshot{ + ObjectMeta: metav1.ObjectMeta{Name: missingK8sSnapshotName, Namespace: testNamespace}, + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: missingK8sMetadata}, + } + missingRKEConfigSnapshot := &rkev1.ETCDSnapshot{ + ObjectMeta: metav1.ObjectMeta{Name: missingRKEConfigSnapshotName, Namespace: testNamespace}, + SnapshotFile: rkev1.ETCDSnapshotFile{Metadata: missingRKEConfigMetadata}, + } + + baseRequest := func() *admission.Request { + return &admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{}, + OldObject: runtime.RawExtension{}, + }, + } + } + baseCluster := func() *v1.Cluster { + return &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace}, + Spec: v1.ClusterSpec{}, + } + } + withRestore := func(cluster *v1.Cluster, mode string, snapshotName string) *v1.Cluster { + cluster.Spec.RKEConfig = &v1.RKEConfig{ + ETCDSnapshotRestore: &rkev1.ETCDSnapshotRestore{ + Name: snapshotName, + RestoreRKEConfig: mode, + }, + } + return cluster + } + + testCases := []struct { + name string + request *admission.Request + oldCluster *v1.Cluster + newCluster *v1.Cluster + mockSetup func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) + expectAllowed bool + expectedError string // For internal, non-admission errors + expectedDenyMsg string // For admission.ResponseBadRequest + }{ + { + name: "should allow on create operation", + request: &admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + oldCluster: nil, + newCluster: baseCluster(), + expectAllowed: true, + }, + { + name: "should allow if new RKEConfig is nil", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: baseCluster(), // Spec.RKEConfig is nil + expectAllowed: true, + }, + { + name: "should allow if new restore spec is nil", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: func() *v1.Cluster { + cluster := baseCluster() + cluster.Spec.RKEConfig = &v1.RKEConfig{ETCDSnapshotRestore: nil} + return cluster + }(), + expectAllowed: true, + }, + { + name: "should allow if restore spec is unchanged", + request: baseRequest(), + oldCluster: withRestore(baseCluster(), "all", validAllSnapshotName), + newCluster: withRestore(baseCluster(), "all", validAllSnapshotName), + expectAllowed: true, + }, + { + name: "should allow if new restore name is empty", + request: baseRequest(), + oldCluster: withRestore(baseCluster(), "all", validAllSnapshotName), + newCluster: withRestore(baseCluster(), "all", ""), // <-- Name is empty + expectAllowed: true, + }, + { + name: "should allow if new restore mode is empty", + request: baseRequest(), + oldCluster: withRestore(baseCluster(), "all", validAllSnapshotName), + newCluster: withRestore(baseCluster(), "", validAllSnapshotName), // <-- Mode is empty + expectAllowed: true, + }, + { + name: "CRITICAL: should allow unchanged spec even if snapshot is missing", + request: baseRequest(), + oldCluster: withRestore(baseCluster(), "all", nonExistentSnapshotName), + newCluster: withRestore(baseCluster(), "all", nonExistentSnapshotName), + expectAllowed: true, + }, + { + name: "should deny if snapshot not found", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "all", nonExistentSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, nonExistentSnapshotName). + Return(nil, apierrors.NewNotFound(rkev1.Resource("etcdsnapshot"), nonExistentSnapshotName)) + }, + expectAllowed: false, + expectedDenyMsg: fmt.Sprintf("etcd restore references missing snapshot %s in namespace %s", nonExistentSnapshotName, testNamespace), + }, + { + name: "should return internal error if cache fails", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "all", internalErrorSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, internalErrorSnapshotName). + Return(nil, fmt.Errorf("internal cache error")) + }, + expectAllowed: false, + expectedError: "internal cache error", + }, + { + name: "should deny if metadata is invalid", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "all", invalidMetadataSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, invalidMetadataSnapshotName). + Return(invalidMetadataSnapshot, nil) + }, + expectAllowed: false, + expectedDenyMsg: fmt.Sprintf("invalid ETCD snapshot metadata for %s/%s", testNamespace, invalidMetadataSnapshotName), + }, + { + name: "should allow restore mode 'none' even with invalid metadata", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "none", invalidMetadataSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, invalidMetadataSnapshotName). + Return(invalidMetadataSnapshot, nil) + }, + expectAllowed: true, + }, + { + name: "should allow restore mode 'kubernetesVersion' with valid spec", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "kubernetesVersion", validK8sSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, validK8sSnapshotName). + Return(validK8sSnapshot, nil) + }, + expectAllowed: true, + }, + { + name: "should deny restore mode 'kubernetesVersion' with missing k8s version", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "kubernetesVersion", missingK8sSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, missingK8sSnapshotName). + Return(missingK8sSnapshot, nil) + }, + expectAllowed: false, + expectedDenyMsg: "snapshot metadata missing KubernetesVersion for kubernetesVersion restore", + }, + { + name: "should allow restore mode 'all' with valid spec", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "all", validAllSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, validAllSnapshotName). + Return(validAllSnapshot, nil) + }, + expectAllowed: true, + }, + { + name: "should deny restore mode 'all' with missing RKEConfig", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "all", missingRKEConfigSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, missingRKEConfigSnapshotName). + Return(missingRKEConfigSnapshot, nil) + }, + expectAllowed: false, + expectedDenyMsg: "snapshot metadata must include RKEConfig and KubernetesVersion for 'all' restore", + }, + { + name: "should deny unsupported restore mode", + request: baseRequest(), + oldCluster: baseCluster(), + newCluster: withRestore(baseCluster(), "invalid-mode", validAllSnapshotName), + mockSetup: func(mockCache *fake.MockCacheInterface[*rkev1.ETCDSnapshot]) { + mockCache.EXPECT().Get(testNamespace, validAllSnapshotName). + Return(validAllSnapshot, nil) + }, + expectAllowed: false, + expectedDenyMsg: "unsupported restore mode invalid-mode", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + gomockController := gomock.NewController(t) + mockSnapshotCache := fake.NewMockCacheInterface[*rkev1.ETCDSnapshot](gomockController) + admitter := &provisioningAdmitter{ + etcdSnapshotCache: mockSnapshotCache, + } + + if testCase.mockSetup != nil { + testCase.mockSetup(mockSnapshotCache) + } + + response, err := admitter.validateETCDSnapshotRestore(testCase.request, testCase.oldCluster, testCase.newCluster) + + if testCase.expectedError != "" { + requires.Error(err) + requires.Contains(err.Error(), testCase.expectedError) + return + } + + asserts.NoError(err, "unexpected internal error") + + asserts.Equal(testCase.expectAllowed, response.Allowed) + if !testCase.expectAllowed && testCase.expectedDenyMsg != "" { + asserts.Contains(response.Result.Message, testCase.expectedDenyMsg) + } + }) + } +} diff --git a/pkg/server/handlers.go b/pkg/server/handlers.go index 0f9e619b3..8d93805f5 100644 --- a/pkg/server/handlers.go +++ b/pkg/server/handlers.go @@ -55,7 +55,6 @@ func Validation(clients *clients.Clients) ([]admission.ValidatingAdmissionHandle handlers := []admission.ValidatingAdmissionHandler{ feature.NewValidator(), clusters, - provisioningCluster.NewProvisioningClusterValidator(clients), machineconfig.NewValidator(), nshandler.NewValidator(clients.K8s.AuthorizationV1().SubjectAccessReviews()), clusterrepo.NewValidator(), @@ -69,6 +68,7 @@ func Validation(clients *clients.Clients) ([]admission.ValidatingAdmissionHandle handlers = append( handlers, + provisioningCluster.NewProvisioningClusterValidator(clients), clusterproxyconfig.NewValidator(clients.Management.ClusterProxyConfig().Cache()), podsecurityadmissionconfigurationtemplate.NewValidator(clients.Management.Cluster().Cache(), clients.Provisioning.Cluster().Cache()), globalrole.NewValidator(clients.DefaultResolver, grbResolvers, clients.K8s.AuthorizationV1().SubjectAccessReviews(), clients.GlobalRoleResolver),