diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index c7968614137b2..164005a0d87e3 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -223,6 +223,14 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { return nil, cobra.ShellCompDirectiveNoFileComp }) + if featureflag.DiscoveryService.Enabled() { + cmd.Flags().StringVar(&options.PublicDiscoveryServiceURL, "discovery-service", options.PublicDiscoveryServiceURL, "A URL to a server implementing public OIDC discovery. Enables IRSA in AWS.") + cmd.RegisterFlagCompletionFunc("discovery-service", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // TODO complete vfs paths + return nil, cobra.ShellCompDirectiveNoFileComp + }) + } + var validClouds []string { allClouds := clouds.SupportedClouds() diff --git a/discovery/GEMINI.md b/discovery/GEMINI.md new file mode 100644 index 0000000000000..f7ed0d9ce523a --- /dev/null +++ b/discovery/GEMINI.md @@ -0,0 +1,63 @@ +# Discovery Service Project + +## Overview +This project implements a public discovery service designed for decentralized, secure peer discovery. The core innovation is the use of **Custom Certificate Authorities (CAs)** to define isolated "Universes". Clients register and discover peers within their own Universe, identified and secured purely by mTLS. + +The service emulates a Kubernetes API, allowing interaction via `kubectl`, including support for **Server-Side Apply**. + +## Key Concepts + +### 1. The "Universe" +- A **Universe** is an isolated scope for peer discovery. +- It is cryptographically defined by the **SHA256 hash of the Root CA's Public Key**. +- Any client possessing a valid certificate signed by a specific CA belongs to that CA's Universe. +- Different CAs = Different Universes. There is no crossover. + +### 2. Authentication & Authorization +- **Mechanism**: Mutual TLS (mTLS). +- **Client Identity**: Derived from the **Common Name (CN)** of the leaf certificate. +- **Universe Context**: Derived from the **Root CA** presented in the TLS handshake. +- **Requirement**: Clients **MUST** present the full certificate chain (Leaf + Root CA) during the handshake. The server does not maintain a pre-configured trust store for these custom CAs; it uses the presented chain to determine the scope. + +### 3. API Resources +- **DiscoveryEndpoint** (`discovery.kops.k8s.io/v1alpha1`): Represents a peer in the discovery network. Can optionally hold OIDC configuration (Issuer URL, JWKS). +- **Validation**: A client with CN `client1` can only Create/Update a `DiscoveryEndpoint` named `client1`. +- **Apply Support**: The server supports `PATCH` requests to facilitate `kubectl apply --server-side`. + +### 4. OIDC Discovery +The server acts as an OIDC Discovery Provider for the Universe. +- **Public Endpoints**: + - `/.well-known/openid-configuration`: Returns the OIDC discovery document. + - `/openid/v1/jwks`: Returns the JSON Web Key Set (JWKS). +- **Data Source**: These endpoints serve data uploaded by clients via the `DiscoveryEndpoint` resource. + +## Architecture + +### Project Structure +- `cmd/discovery-server/`: Main entry point. Wires up the HTTP server with TLS configuration. +- `pkg/discovery/`: + - `auth.go`: logic for inspecting TLS `PeerCertificates` to extract the Universe ID (CA hash) and Client ID. + - `store.go`: In-memory thread-safe storage (`MemoryStore`) mapping Universe IDs to lists of `DiscoveryEndpoint` objects. + - `server.go`: HTTP handlers implementing the K8s API emulation for `/apis/discovery.kops.k8s.io/v1alpha1`. + - `k8s_types.go`: Definitions of `DiscoveryEndpoint`, `DiscoveryEndpointList`, `TypeMeta`, `ObjectMeta` etc. + +### Data Model +- **DiscoveryEndpoint**: The core resource. Contains `Spec.Addresses` and metadata. +- **Universe**: Contains a map of `DiscoveryEndpoint` objects (keyed by name). +- **Unified Types**: The API type `DiscoveryEndpoint` is used directly for in-memory storage, ensuring zero conversion overhead. + +## Security Model +- **Trust Delegation**: The server delegates trust to the CA. If you hold the CA key, you control the Universe. +- **Isolation**: The server ensures that a client presenting a cert chain for `CA_A` cannot read or write data to the Universe defined by `CA_B`. +- **Ephemeral**: The current implementation uses in-memory storage. Data is lost on restart. + +## Building and Running + +### Build +```bash +go build ./cmd/discovery-server +``` + +### Run + +See docs/walkthrough.md for instructions on testing functionality. diff --git a/discovery/README.md b/discovery/README.md new file mode 100644 index 0000000000000..d44e321e13ada --- /dev/null +++ b/discovery/README.md @@ -0,0 +1,42 @@ +# Discovery Service + +A public discovery service using mTLS for authentication and "Universe" isolation, emulating a Kubernetes API. + +## Concept + +- **Universe**: Defined by the SHA256 Fingerprint of a Custom CA Certificate. +- **Client**: Identified by a Client Certificate signed by that Custom CA. +- **DiscoveryEndpoint**: The resource type representing a registered client. +- **Isolation**: Clients can only see `DiscoveryEndpoint` objects signed by the same Custom CA. + +## Usage + +### Run Server + +```bash +go run ./cmd/discovery-server --tls-cert server.crt --tls-key server.key --listen :8443 +``` + +(You can generate a self-signed server certificate for testing, see the [walkthrough](docs/walkthrough.md) ). + +### Client Requirement + +Clients must authenticate using mTLS. +**Important**: The client MUST provide the full certificate chain, including the Root CA, because the server does not have pre-configured trust stores for these custom universes. +The server identifies the Universe from the SHA256 hash of the Root CA certificate found in the TLS chain. + +### Quick start + +See `docs/walkthrough.md` for detailed instructions. + + +## OIDC Discovery + +The discovery server also serves OIDC discovery information publicly, allowing external systems (like AWS IAM) to discover the cluster's identity provider configuration. + +- `GET //.well-known/openid-configuration`: Returns the OIDC discovery document. +- `GET //openid/v1/jwks`: Returns the JWKS. + +This information is populated by clients uploading `DiscoveryEndpoint` resources containing the `oidc` spec. + +## Building and Running \ No newline at end of file diff --git a/discovery/apis/discovery.kops.k8s.io/v1alpha1/types.go b/discovery/apis/discovery.kops.k8s.io/v1alpha1/types.go new file mode 100644 index 0000000000000..b974303f9bdf2 --- /dev/null +++ b/discovery/apis/discovery.kops.k8s.io/v1alpha1/types.go @@ -0,0 +1,75 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var DiscoveryEndpointGVR = schema.GroupVersionResource{ + Group: "discovery.kops.k8s.io", + Version: "v1alpha1", + Resource: "discoveryendpoints", +} + +var DiscoveryEndpointGVK = schema.GroupVersionKind{ + Group: "discovery.kops.k8s.io", + Version: "v1alpha1", + Kind: "DiscoveryEndpoint", +} + +// DiscoveryEndpoint represents a registered client in the discovery service. +type DiscoveryEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DiscoveryEndpointSpec `json:"spec,omitempty"` +} + +// DiscoveryEndpointSpec corresponds to our internal Node data. +type DiscoveryEndpointSpec struct { + Addresses []string `json:"addresses,omitempty"` + LastSeen string `json:"lastSeen,omitempty"` + OIDC *OIDCSpec `json:"oidc,omitempty"` +} + +type OIDCSpec struct { + // IssuerURL string `json:"issuerURL,omitempty"` + Keys []JSONWebKey `json:"keys,omitempty"` +} + +type JSONWebKey struct { + Use string `json:"use,omitempty"` + KeyType string `json:"kty,omitempty"` + KeyID string `json:"kid,omitempty"` + Algorithm string `json:"alg,omitempty"` + N string `json:"n,omitempty"` + E string `json:"e,omitempty"` + // Crv string `json:"crv,omitempty"` + // X string `json:"x,omitempty"` + // Y string `json:"y,omitempty"` +} + +// DiscoveryEndpointList is a list of DiscoveryEndpoint objects. +type DiscoveryEndpointList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // We implement a minimal subset. + Metadata metav1.ListMeta `json:"metadata,omitempty"` + Items []DiscoveryEndpoint `json:"items"` +} diff --git a/discovery/apis/discovery.kops.k8s.io/v1alpha1/universeid.go b/discovery/apis/discovery.kops.k8s.io/v1alpha1/universeid.go new file mode 100644 index 0000000000000..9482a74d30508 --- /dev/null +++ b/discovery/apis/discovery.kops.k8s.io/v1alpha1/universeid.go @@ -0,0 +1,29 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" +) + +func ComputeUniverseIDFromCertificate(cert *x509.Certificate) string { + hash := sha256.Sum256(cert.RawSubjectPublicKeyInfo) + universeID := hex.EncodeToString(hash[:]) + return universeID +} diff --git a/discovery/cmd/discovery-server/main.go b/discovery/cmd/discovery-server/main.go new file mode 100644 index 0000000000000..e8debe6a50324 --- /dev/null +++ b/discovery/cmd/discovery-server/main.go @@ -0,0 +1,74 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/tls" + "flag" + "fmt" + "log" + "net/http" + "os" + + "k8s.io/kops/discovery/pkg/discovery" +) + +func main() { + certFile := os.Getenv("TLS_CERT") + flag.StringVar(&certFile, "tls-cert", certFile, "Path to server TLS certificate") + + keyFile := os.Getenv("TLS_KEY") + flag.StringVar(&keyFile, "tls-key", keyFile, "Path to server TLS key") + + addr := flag.String("listen", ":8443", "Address to listen on") + storageType := flag.String("storage", "memory", "Storage backend (memory, gcs)") + flag.Parse() + + if certFile == "" || keyFile == "" { + fmt.Fprintf(os.Stderr, "Error: --tls-cert and --tls-key are required\n") + flag.Usage() + os.Exit(1) + } + + var store discovery.Store + + switch *storageType { + case "memory": + store = discovery.NewMemoryStore() + default: + log.Fatalf("Unknown storage type: %s", *storageType) + } + + handler := discovery.NewServer(store) + + tlsConfig := &tls.Config{ + ClientAuth: tls.RequestClientCert, + // We do not set ClientCAs because we accept any CA and use it to define the universe. + MinVersion: tls.VersionTLS12, + } + + server := &http.Server{ + Addr: *addr, + Handler: handler, + TLSConfig: tlsConfig, + } + + log.Printf("Discovery server listening on %s using %s storage", *addr, *storageType) + if err := server.ListenAndServeTLS(certFile, keyFile); err != nil { + log.Fatalf("Server failed: %v", err) + } +} diff --git a/discovery/dev/tasks/deploy-to-k8s b/discovery/dev/tasks/deploy-to-k8s new file mode 100755 index 0000000000000..4701ce3d45160 --- /dev/null +++ b/discovery/dev/tasks/deploy-to-k8s @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Copyright 2025 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +REPO_ROOT="$(git rev-parse --show-toplevel)" +cd "${REPO_ROOT}/discovery" + +if [[ -z "${IMAGE_PREFIX:-}" ]]; then + IMAGE_PREFIX="${USER}/" +fi + +IMAGE_TAG=$(date +%Y%m%d%H%M%S) + +# Build the discovery-server image +VERSION=${IMAGE_TAG} GITSHA=$(git describe --always) KO_DOCKER_REPO="${IMAGE_PREFIX}discovery-server" go run github.com/google/ko@v0.18.0 \ + build --tags "${IMAGE_TAG}" --platform=linux/amd64,linux/arm64 --bare ./cmd/discovery-server/ + +echo "Can install cert-manager with the following command:" +echo "kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.2/cert-manager.yaml" + +kubectl create namespace discovery-k8s-io --dry-run=client -o yaml | kubectl apply -f - + + +cat k8s/manifest.yaml | \ +sed "s|discovery-server:latest|${IMAGE_PREFIX}discovery-server:${IMAGE_TAG}|g" | \ +KUBECTL_APPLYSET=true kubectl apply -n discovery-k8s-io --prune --applyset=discovery-server -f - diff --git a/discovery/docs/walkthrough.md b/discovery/docs/walkthrough.md new file mode 100644 index 0000000000000..1c9f075ac1828 --- /dev/null +++ b/discovery/docs/walkthrough.md @@ -0,0 +1,60 @@ +# Walkthrough of functionality + + +Quick start: + +```bash +# Generate certs, kubeconfig, yaml files +# Check out the script to better understand how all the pieces fit together! +./scripts/create-kubeconfig.sh + +# Start Server (using generated server certs) +go run ./cmd/discovery-server --tls-cert walkthrough/server.crt --tls-key walkthrough/server.key & + +# Verify server is running and serving the DiscoveryEndpoint resource +kubectl --kubeconfig=walkthrough/universe1/client1.kubeconfig api-resources + +# List DiscoveryEndpoints +kubectl --kubeconfig=walkthrough/universe1/client1.kubeconfig get discoveryendpoints --all-namespaces + +# Register (Apply) +# The `metadata.name` MUST match the Common Name (CN) of your client certificate (e.g., `client1`), or the server will reject it with 403 Forbidden. +kubectl --kubeconfig=walkthrough/universe1/client1.kubeconfig apply -f walkthrough/universe1/client1-discoveryendpoint.yaml --server-side=true --validate=false + +# List DiscoveryEndpoints +kubectl --kubeconfig=walkthrough/universe1/client1.kubeconfig get discoveryendpoints --all-namespaces +``` + + +## Using curl + +The kubernetes API is a well-structured REST API, so we don't have to use kubectl. + +If you want to test the API with curl, you must include the **Universe ID** in the URL path. + +**Export the Universe ID:** +```bash +export UNIVERSE_ID=$(openssl x509 -in walkthrough/universe1/ca.crt -noout -fingerprint -sha256 | sed 's/SHA256 Fingerprint=//' | sed 's/://g' | tr '[:upper:]' '[:lower:]') +echo "UNIVERSE_ID is ${UNIVERSE_ID}" +``` + +```bash +curl --cert walkthrough/universe1/client1-bundle.crt --key walkthrough/universe1/client1.key --cacert walkthrough/server.crt \ + "https://localhost:8443/${UNIVERSE_ID}/apis/discovery.kops.k8s.io/v1alpha1/namespaces/default/discoveryendpoints" +``` + +## OIDC discovery + +The goal here is to allow anonymous access to OIDC endpoints, so let's verify that: + +**Getting the OpenID Configuration** +```bash +curl --cacert walkthrough/server.crt "https://localhost:8443/${UNIVERSE_ID}/.well-known/openid-configuration" +``` + +**Getting the JWKS keys** +```bash +curl --cacert walkthrough/server.crt "https://localhost:8443/${UNIVERSE_ID}/openid/v1/jwks" +``` + +Note that we do not need a client certificate to get this data. Data from the DiscoveryEndpoints is published publicly. \ No newline at end of file diff --git a/discovery/k8s/manifest.yaml b/discovery/k8s/manifest.yaml new file mode 100644 index 0000000000000..a79c10c326578 --- /dev/null +++ b/discovery/k8s/manifest.yaml @@ -0,0 +1,145 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: discovery-k8s-io + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: discovery-server + namespace: discovery-k8s-io +spec: + replicas: 1 + selector: + matchLabels: + app: discovery-server + template: + metadata: + labels: + app: discovery-server + spec: + containers: + - name: discovery-server + image: discovery-server:latest + env: + - name: TLS_CERT + value: /secrets/tls/tls.crt + - name: TLS_KEY + value: /secrets/tls/tls.key + volumeMounts: + - name: tls + mountPath: /secrets/tls + volumes: + - name: tls + secret: + secretName: discovery-kubedisco-com-tls +--- + +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: letsencrypt-staging + namespace: discovery-k8s-io +spec: + acme: + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-staging-issuer-account-key + solvers: + - http01: + gatewayHTTPRoute: + parentRefs: + - name: discovery-server-gateway + namespace: discovery-k8s-io + kind: Gateway + +--- + +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: letsencrypt + namespace: discovery-k8s-io +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-prod-issuer-account-key + solvers: + - http01: + gatewayHTTPRoute: + parentRefs: + - name: discovery-server-gateway + namespace: discovery-k8s-io + kind: Gateway + +--- + +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: discovery-server-gateway + namespace: discovery-k8s-io +spec: + gatewayClassName: gari + listeners: + - name: http + protocol: HTTP + port: 80 + allowedRoutes: + namespaces: + from: All + - name: https + protocol: HTTPS + port: 443 + allowedRoutes: + namespaces: + from: All + + +--- + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: discovery-kubedisco-com + namespace: discovery-k8s-io +spec: + dnsNames: + - discovery.kubedisco.com + secretName: discovery-kubedisco-com-tls + issuerRef: + name: letsencrypt + +--- + +apiVersion: v1 +kind: Service +metadata: + name: discovery-server + namespace: discovery-k8s-io +spec: + selector: + app: discovery-server + ports: + - name: https + port: 443 + targetPort: 8443 + type: ClusterIP + +--- + +apiVersion: gateway.networking.k8s.io/v1alpha3 +kind: TLSRoute +metadata: + name: discovery.kubedisco.com + namespace: discovery-k8s-io +spec: + hostnames: + - discovery.kubedisco.com + rules: + - backendRefs: + - name: discovery-server + port: 443 diff --git a/discovery/pkg/discovery/auth.go b/discovery/pkg/discovery/auth.go new file mode 100644 index 0000000000000..95ff9c2de5b09 --- /dev/null +++ b/discovery/pkg/discovery/auth.go @@ -0,0 +1,97 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "fmt" + "net/http" +) + +type UserInfo struct { + UniverseID string + ClientID string +} + +// AuthenticateClientToUniverse extracts the Universe ID and Client ID from the mTLS connection. +// The Universe ID is defined as the SHA256 hash of the root CA certificate (DER bytes) +// presented in the client's certificate chain. +// The Client ID is taken from the Common Name (CN) of the leaf certificate. +func AuthenticateClientToUniverse(r *http.Request, universeID string) (*UserInfo, error) { + if r.TLS == nil { + return nil, fmt.Errorf("no TLS connection") + } + if len(r.TLS.PeerCertificates) == 0 { + return nil, fmt.Errorf("no client certificate presented") + } + + // Verify the chain is valid, though we don't validate that the CA certificate is trusted. + var verifiedChains [][]*x509.Certificate + { + peerCertificates := r.TLS.PeerCertificates + + opts := x509.VerifyOptions{ + Roots: x509.NewCertPool(), + Intermediates: x509.NewCertPool(), + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + + for i := 1; i < len(peerCertificates); i++ { + if i == len(peerCertificates)-1 { + // Last cert is the root + opts.Roots.AddCert(peerCertificates[i]) + } else { + opts.Intermediates.AddCert(peerCertificates[i]) + } + } + + chains, err := peerCertificates[0].Verify(opts) + if err != nil { + return nil, fmt.Errorf("failed to verify client certificate chain: %w", err) + } + verifiedChains = chains + } + + // The universe ID must match at least one of the certificates in the chain (typically the root CA). + var matchingChain []*x509.Certificate + for _, verifiedChain := range verifiedChains { + for _, cert := range verifiedChain { + hash := sha256.Sum256(cert.RawSubjectPublicKeyInfo) + calculatedUniverseID := hex.EncodeToString(hash[:]) + if calculatedUniverseID == universeID { + matchingChain = verifiedChain + break + } + } + } + + if matchingChain == nil { + return nil, fmt.Errorf("client certificate chain does not match universe ID") + } + + clientID := matchingChain[0].Subject.CommonName + if clientID == "" { + return nil, fmt.Errorf("client certificate missing Common Name") + } + + return &UserInfo{ + UniverseID: universeID, + ClientID: clientID, + }, nil +} diff --git a/discovery/pkg/discovery/memory_store.go b/discovery/pkg/discovery/memory_store.go new file mode 100644 index 0000000000000..18a30abfa9c4f --- /dev/null +++ b/discovery/pkg/discovery/memory_store.go @@ -0,0 +1,105 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "context" + "sync" + "time" + + api "k8s.io/kops/discovery/apis/discovery.kops.k8s.io/v1alpha1" +) + +type MemoryStore struct { + universes map[string]*Universe + mu sync.RWMutex +} + +func NewMemoryStore() *MemoryStore { + return &MemoryStore{ + universes: make(map[string]*Universe), + } +} + +func (s *MemoryStore) getOrCreateUniverse(id string) *Universe { + s.mu.Lock() + defer s.mu.Unlock() + if u, ok := s.universes[id]; ok { + return u + } + u := &Universe{ + ID: id, + DiscoveryEndpoints: make(map[NamespacedName]*api.DiscoveryEndpoint), + } + s.universes[id] = u + return u +} + +func (s *MemoryStore) UpsertDiscoveryEndpoint(ctx context.Context, universeID string, ep *api.DiscoveryEndpoint) error { + u := s.getOrCreateUniverse(universeID) + + u.mu.Lock() + defer u.mu.Unlock() + + // Ensure basic metadata is consistent + ep.TypeMeta.Kind = "DiscoveryEndpoint" + ep.TypeMeta.APIVersion = "discovery.kops.k8s.io/v1alpha1" + + // Update LastSeen + ep.Spec.LastSeen = time.Now().Format(time.RFC3339) + + id := NamespacedName{ + Namespace: ep.ObjectMeta.Namespace, + Name: ep.ObjectMeta.Name, + } + u.DiscoveryEndpoints[id] = ep + return nil +} + +func (s *MemoryStore) ListDiscoveryEndpoints(ctx context.Context, universeID string) ([]*api.DiscoveryEndpoint, error) { + // For listing, we don't necessarily need to create the universe if it doesn't exist, + // but for consistency with getOrCreate logic in memory store, checking existence is enough. + s.mu.RLock() + u, ok := s.universes[universeID] + s.mu.RUnlock() + + if !ok { + return []*api.DiscoveryEndpoint{}, nil + } + + u.mu.RLock() + defer u.mu.RUnlock() + endpoints := make([]*api.DiscoveryEndpoint, 0, len(u.DiscoveryEndpoints)) + for _, n := range u.DiscoveryEndpoints { + endpoints = append(endpoints, n) + } + return endpoints, nil +} + +func (s *MemoryStore) GetDiscoveryEndpoint(ctx context.Context, universeID string, ns, name string) (*api.DiscoveryEndpoint, error) { + s.mu.RLock() + u, ok := s.universes[universeID] + s.mu.RUnlock() + + if !ok { + return nil, nil + } + + u.mu.RLock() + defer u.mu.RUnlock() + return u.DiscoveryEndpoints[NamespacedName{Namespace: ns, Name: name}], nil +} diff --git a/discovery/pkg/discovery/oidc.go b/discovery/pkg/discovery/oidc.go new file mode 100644 index 0000000000000..236480fdd830c --- /dev/null +++ b/discovery/pkg/discovery/oidc.go @@ -0,0 +1,150 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "k8s.io/klog/v2" + api "k8s.io/kops/discovery/apis/discovery.kops.k8s.io/v1alpha1" +) + +func (s *Server) handleOIDCDiscovery(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := klog.FromContext(ctx) + + universeID := r.PathValue("universe") + + host := r.Host + if host == "" { + log.Info("Cannot determine host for OIDC discovery") + http.Error(w, "Cannot determine host", http.StatusBadRequest) + return + } + + endpoints, err := s.Store.ListDiscoveryEndpoints(r.Context(), universeID) + if err != nil { + http.Error(w, fmt.Sprintf("Error listing endpoints: %v", err), http.StatusInternalServerError) + return + } + + issuerURL := "https://" + host + "/" + universeID + "/" + + var oidcSpec *api.OIDCSpec + for _, ep := range endpoints { + if ep.Spec.OIDC != nil { + oidcSpec = ep.Spec.OIDC + break + } + } + + if oidcSpec == nil { + http.NotFound(w, r) + return + } + + // Construct minimal OIDC discovery document + jwksURI := issuerURL + if !strings.HasSuffix(jwksURI, "/") { + jwksURI += "/" + } + jwksURI += "openid/v1/jwks" + + resp := OIDCDiscoveryResponse{ + Issuer: issuerURL, + JWKSURI: jwksURI, + ResponseTypesSupported: []string{"id_token"}, + SubjectTypesSupported: []string{"public"}, + IDTokenSigningAlgValuesSupported: []string{"RS256"}, + } + s.writeJSON(w, http.StatusOK, resp) + + log.Info("served OIDC discovery document", "universe", universeID) +} + +func (s *Server) handleOIDCJWKS(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := klog.FromContext(ctx) + + universeID := r.PathValue("universe") + + endpoints, err := s.Store.ListDiscoveryEndpoints(r.Context(), universeID) + if err != nil { + log.Error(err, "error listing endpoints") + http.Error(w, fmt.Sprintf("Error listing endpoints: %v", err), http.StatusInternalServerError) + return + } + + type keyInfo struct { + key api.JSONWebKey + lastSeen string + } + keysMap := make(map[string]keyInfo) + + for _, ep := range endpoints { + if ep.Spec.OIDC == nil { + continue + } + for _, key := range ep.Spec.OIDC.Keys { + kid := key.KeyID + if kid == "" { + continue + } + + // Conflict resolution: prefer newest LastSeen + currentLastSeen := ep.Spec.LastSeen + if existing, exists := keysMap[kid]; exists { + if currentLastSeen > existing.lastSeen { + keysMap[kid] = keyInfo{key: key, lastSeen: currentLastSeen} + } + } else { + keysMap[kid] = keyInfo{key: key, lastSeen: currentLastSeen} + } + } + } + + var mergedKeys []api.JSONWebKey + for _, info := range keysMap { + mergedKeys = append(mergedKeys, info.key) + } + + response := map[string]interface{}{ + "keys": mergedKeys, + } + + s.writeJSON(w, http.StatusOK, response) + log.Info("served OIDC JWKS", "universe", universeID) +} + +type OIDCDiscoveryResponse struct { + Issuer string `json:"issuer,omitempty"` + JWKSURI string `json:"jwks_uri,omitempty"` + ResponseTypesSupported []string `json:"response_types_supported,omitempty"` + SubjectTypesSupported []string `json:"subject_types_supported,omitempty"` + IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported,omitempty"` +} + +func (s *Server) writeJSON(w http.ResponseWriter, statusCode int, v interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + if err := json.NewEncoder(w).Encode(v); err != nil { + fmt.Printf("Error encoding response: %v\n", err) + } +} diff --git a/discovery/pkg/discovery/server.go b/discovery/pkg/discovery/server.go new file mode 100644 index 0000000000000..289c7740ca270 --- /dev/null +++ b/discovery/pkg/discovery/server.go @@ -0,0 +1,246 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "encoding/json" + "fmt" + "net/http" + + "k8s.io/klog/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/kops/discovery/apis/discovery.kops.k8s.io/v1alpha1" +) + +type Server struct { + Store Store + mux *http.ServeMux +} + +func NewServer(store Store) *Server { + s := &Server{ + Store: store, + mux: http.NewServeMux(), + } + s.registerRoutes() + return s +} + +func (s *Server) registerRoutes() { + // Public OIDC Discovery + s.mux.HandleFunc("GET /{universe}/.well-known/openid-configuration", s.handleOIDCDiscovery) + s.mux.HandleFunc("GET /{universe}/openid/v1/jwks", s.handleOIDCJWKS) + + // Authenticated Routes + // 1. Root Discovery (/apis) + s.mux.HandleFunc("GET /{universe}/apis", s.withAuth(s.handleAPIGroupList)) + + // Discovering resources in group + s.mux.HandleFunc("GET /{universe}/apis/discovery.kops.k8s.io/v1alpha1", s.withAuth(s.handleAPIResourceList)) + + // Listing DiscoveryEndpoints (All namespaces) + s.mux.HandleFunc("GET /{universe}/apis/discovery.kops.k8s.io/v1alpha1/discoveryendpoints", s.withAuth(s.handleListDiscoveryEndpoints)) + + // Listing DiscoveryEndpoints (Specific namespace) + s.mux.HandleFunc("GET /{universe}/apis/discovery.kops.k8s.io/v1alpha1/namespaces/{namespace}/discoveryendpoints", s.withAuth(s.handleListDiscoveryEndpoints)) + + // Create DiscoveryEndpoint + s.mux.HandleFunc("POST /{universe}/apis/discovery.kops.k8s.io/v1alpha1/namespaces/{namespace}/discoveryendpoints", s.withAuth(s.handleCreateDiscoveryEndpoint)) + + // Get DiscoveryEndpoint + s.mux.HandleFunc("GET /{universe}/apis/discovery.kops.k8s.io/v1alpha1/namespaces/{namespace}/discoveryendpoints/{name}", s.withAuth(s.handleGetDiscoveryEndpoint)) + + // Apply (Patch) DiscoveryEndpoint + s.mux.HandleFunc("PATCH /{universe}/apis/discovery.kops.k8s.io/v1alpha1/namespaces/{namespace}/discoveryendpoints/{name}", s.withAuth(s.handleApplyDiscoveryEndpoint)) +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.mux.ServeHTTP(w, r) +} + +func (s *Server) withAuth(next func(http.ResponseWriter, *http.Request, *UserInfo)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + universeID := r.PathValue("universe") + userInfo, err := AuthenticateClientToUniverse(r, universeID) + if err != nil { + klog.Warningf("Unauthorized access attempt to universe %s: %v", universeID, err) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + next(w, r, userInfo) + } +} + +// Handlers + +func (s *Server) handleAPIGroupList(w http.ResponseWriter, r *http.Request, _ *UserInfo) { + resp := metav1.APIGroupList{ + TypeMeta: metav1.TypeMeta{Kind: "APIGroupList", APIVersion: "v1"}, + Groups: []metav1.APIGroup{ + { + Name: "discovery.kops.k8s.io", + Versions: []metav1.GroupVersionForDiscovery{ + {GroupVersion: "discovery.kops.k8s.io/v1alpha1", Version: "v1alpha1"}, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: "discovery.kops.k8s.io/v1alpha1", + Version: "v1alpha1", + }, + }, + }, + } + s.writeJSON(w, http.StatusOK, resp) +} + +func (s *Server) handleAPIResourceList(w http.ResponseWriter, r *http.Request, _ *UserInfo) { + resp := metav1.APIResourceList{ + TypeMeta: metav1.TypeMeta{Kind: "APIResourceList", APIVersion: "v1"}, + GroupVersion: "discovery.kops.k8s.io/v1alpha1", + APIResources: []metav1.APIResource{ + { + Name: "discoveryendpoints", + SingularName: "discoveryendpoint", + Namespaced: true, + Kind: "DiscoveryEndpoint", + Verbs: []string{"get", "list", "create", "update", "patch"}, + }, + }, + } + s.writeJSON(w, http.StatusOK, resp) +} + +func (s *Server) handleListDiscoveryEndpoints(w http.ResponseWriter, r *http.Request, _ *UserInfo) { + universeID := r.PathValue("universe") + ns := r.PathValue("namespace") // Empty if not in path + + endpoints, err := s.Store.ListDiscoveryEndpoints(r.Context(), universeID) + if err != nil { + http.Error(w, fmt.Sprintf("Error listing endpoints: %v", err), http.StatusInternalServerError) + return + } + + resp := api.DiscoveryEndpointList{ + TypeMeta: metav1.TypeMeta{Kind: "DiscoveryEndpointList", APIVersion: "discovery.kops.k8s.io/v1alpha1"}, + } + + for _, ep := range endpoints { + if ns == "" || ep.ObjectMeta.Namespace == ns { + resp.Items = append(resp.Items, *ep) + } + } + + s.writeJSON(w, http.StatusOK, resp) +} + +func (s *Server) handleCreateDiscoveryEndpoint(w http.ResponseWriter, r *http.Request, userInfo *UserInfo) { + universeID := r.PathValue("universe") + ns := r.PathValue("namespace") + + var input api.DiscoveryEndpoint + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Validation: ensure the name matches the clientID from the cert + if input.ObjectMeta.Name != "" && input.ObjectMeta.Name != userInfo.ClientID { + http.Error(w, fmt.Sprintf("Forbidden: cannot register node name '%s' with client cert '%s'", input.ObjectMeta.Name, userInfo.ClientID), http.StatusForbidden) + return + } + + // Validation: ensure the namespace in body matches the URL + if input.ObjectMeta.Namespace != ns { + http.Error(w, "Forbidden: namespace does not match", http.StatusForbidden) + return + } + + if err := s.Store.UpsertDiscoveryEndpoint(r.Context(), universeID, &input); err != nil { + http.Error(w, fmt.Sprintf("Error creating endpoint: %v", err), http.StatusInternalServerError) + return + } + + // Return the created object + s.writeJSON(w, http.StatusCreated, input) +} + +func (s *Server) handleApplyDiscoveryEndpoint(w http.ResponseWriter, r *http.Request, userInfo *UserInfo) { + ctx := r.Context() + log := klog.FromContext(ctx) + + universeID := r.PathValue("universe") + ns := r.PathValue("namespace") + name := r.PathValue("name") + + var input api.DiscoveryEndpoint + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { + log.Info("invalid request body", "error", err) + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Validation: ensure the name matches the clientID from the cert + if input.ObjectMeta.Name != "" && input.ObjectMeta.Name != userInfo.ClientID { + log.Info("Forbidden: cannot register node name", "name", input.ObjectMeta.Name, "clientID", userInfo.ClientID) + http.Error(w, fmt.Sprintf("Forbidden: cannot register node name '%s' with client cert '%s'", input.ObjectMeta.Name, userInfo.ClientID), http.StatusForbidden) + return + } + + // Validation: ensure the name in body matches the URL + if input.ObjectMeta.Name != name { + log.Info("Forbidden: name does not match", "name", input.ObjectMeta.Name, "expected", name) + http.Error(w, "Forbidden: name does not match", http.StatusForbidden) + return + } + + // Validation: ensure the namespace in body matches the URL + if input.ObjectMeta.Namespace != ns { + log.Info("Forbidden: namespace does not match", "namespace", input.ObjectMeta.Namespace, "expected", ns) + http.Error(w, "Forbidden: namespace does not match", http.StatusForbidden) + return + } + + if err := s.Store.UpsertDiscoveryEndpoint(r.Context(), universeID, &input); err != nil { + log.Error(err, "error applying endpoint") + http.Error(w, fmt.Sprintf("Error applying endpoint: %v", err), http.StatusInternalServerError) + return + } + + // Return the created object + s.writeJSON(w, http.StatusCreated, input) + + log.Info("Applied endpoint", "namespace", input.ObjectMeta.Namespace, "name", input.ObjectMeta.Name, "universe", universeID) +} + +func (s *Server) handleGetDiscoveryEndpoint(w http.ResponseWriter, r *http.Request, _ *UserInfo) { + universeID := r.PathValue("universe") + ns := r.PathValue("namespace") + name := r.PathValue("name") + + found, err := s.Store.GetDiscoveryEndpoint(r.Context(), universeID, ns, name) + if err != nil { + http.Error(w, fmt.Sprintf("Error getting endpoint: %v", err), http.StatusInternalServerError) + return + } + + if found == nil { + http.NotFound(w, r) + return + } + + s.writeJSON(w, http.StatusOK, found) +} diff --git a/discovery/pkg/discovery/server_test.go b/discovery/pkg/discovery/server_test.go new file mode 100644 index 0000000000000..f93cd9acd5346 --- /dev/null +++ b/discovery/pkg/discovery/server_test.go @@ -0,0 +1,545 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + api "k8s.io/kops/discovery/apis/discovery.kops.k8s.io/v1alpha1" +) + +// ServerHarness wraps the test server setup +type ServerHarness struct { + Store Store + Server *httptest.Server + ServerURL string +} + +func NewServerHarness() *ServerHarness { + store := NewMemoryStore() + handler := NewServer(store) + server := httptest.NewUnstartedServer(handler) + server.TLS = &tls.Config{ + ClientAuth: tls.RequestClientCert, + } + server.StartTLS() + + return &ServerHarness{ + Store: store, + Server: server, + ServerURL: server.URL, + } +} + +func (h *ServerHarness) Close() { + h.Server.Close() +} + +func (h *ServerHarness) Certificate() *x509.Certificate { + return h.Server.TLS.Certificates[0].Leaf +} + +func (h *ServerHarness) NewUniverse(t *testing.T, name string) *UniverseHarness { + ca, caKey, err := generateCA(name) + if err != nil { + t.Fatalf("Failed to generate CA %s: %v", name, err) + } + hash := sha256.Sum256(ca.RawSubjectPublicKeyInfo) + id := hex.EncodeToString(hash[:]) + + return &UniverseHarness{ + T: t, + Server: h, + CA: ca, + CAKey: caKey, + ID: id, + } +} + +func generateCA(cn string) (*x509.Certificate, *rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: cn}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + der, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) + if err != nil { + return nil, nil, err + } + cert, err := x509.ParseCertificate(der) + return cert, key, err +} + +// UniverseHarness wraps a Universe (CA) context +type UniverseHarness struct { + T *testing.T + + Server *ServerHarness + CA *x509.Certificate + CAKey *rsa.PrivateKey + ID string +} + +// ClientHarness wraps the client-side logic +type ClientHarness struct { + KubeClient dynamic.Interface + HTTPClient *http.Client + UniverseID string + ServerURL string + Name string // Client Identity (CN) +} + +func pemEncodeCerts(certs ...*x509.Certificate) []byte { + var b bytes.Buffer + for _, cert := range certs { + pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + } + return b.Bytes() +} + +func pemEncodeKey(key *rsa.PrivateKey) []byte { + b := x509.MarshalPKCS1PrivateKey(key) + return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: b}) +} + +func (h *UniverseHarness) NewClient(clientName string) *ClientHarness { + clientCert, clientKey, err := generateClientCert(clientName, h.CA, h.CAKey) + if err != nil { + h.T.Fatalf("failed to generate client cert: %v", err) + } + + hash := sha256.Sum256(h.CA.RawSubjectPublicKeyInfo) + universeID := hex.EncodeToString(hash[:]) + + // 1. Configure Dynamic Client (k8s protocol) + config := &rest.Config{ + Host: h.Server.ServerURL + "/" + universeID, + TLSClientConfig: rest.TLSClientConfig{ + CAData: pemEncodeCerts(h.Server.Certificate()), + CertData: pemEncodeCerts(clientCert, h.CA), + KeyData: pemEncodeKey(clientKey), + }, + } + kubeClient, err := dynamic.NewForConfig(config) + if err != nil { + h.T.Fatalf("failed to create dynamic client: %v", err) + } + + // 2. Configure HTTP Client (for OIDC/raw requests) + tlsCert := tls.Certificate{ + Certificate: [][]byte{clientCert.Raw, h.CA.Raw}, + PrivateKey: clientKey, + } + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: func() *x509.CertPool { + pool := x509.NewCertPool() + pool.AddCert(h.Server.Certificate()) + return pool + }(), + Certificates: []tls.Certificate{tlsCert}, + }, + }, + } + + return &ClientHarness{ + KubeClient: kubeClient, + HTTPClient: httpClient, + UniverseID: universeID, + ServerURL: h.Server.ServerURL, + Name: clientName, + } +} + +func generateClientCert(cn string, ca *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + template := &x509.Certificate{ + SerialNumber: big.NewInt(2), // Randomize if needed + Subject: pkix.Name{CommonName: cn}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + der, err := x509.CreateCertificate(rand.Reader, template, ca, &key.PublicKey, caKey) + if err != nil { + return nil, nil, err + } + cert, err := x509.ParseCertificate(der) + return cert, key, err +} + +// NewAnonymousClient creates a client without client certs (for public endpoints) +func (h *UniverseHarness) NewAnonymousClient(serverURL string) *ClientHarness { + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: func() *x509.CertPool { + pool := x509.NewCertPool() + pool.AddCert(h.Server.Certificate()) + return pool + }(), + }, + }, + } + return &ClientHarness{ + HTTPClient: client, + UniverseID: h.ID, + ServerURL: serverURL, + Name: "public", + } +} + +func (c *ClientHarness) Get(path string) (*http.Response, error) { + url := fmt.Sprintf("%s/%s/%s", c.ServerURL, c.UniverseID, path) + return c.HTTPClient.Get(url) +} + +// RegisterDiscoveryEndpoint uses the dynamic client to create/apply the endpoint +func (c *ClientHarness) RegisterDiscoveryEndpoint(ns string, spec api.DiscoveryEndpointSpec) (*api.DiscoveryEndpoint, error) { + ep := &api.DiscoveryEndpoint{ + Spec: spec, + } + + ep.Kind = "DiscoveryEndpoint" + ep.APIVersion = "discovery.kops.k8s.io/v1alpha1" + + ep.Name = c.Name + ep.Namespace = ns + + // Convert to Unstructured + uContent, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ep) + if err != nil { + return nil, fmt.Errorf("failed to convert to unstructured: %v", err) + } + u := &unstructured.Unstructured{Object: uContent} + + gvr := api.DiscoveryEndpointGVR + + // Use Server-Side Apply to Create/Update + // Note: We use "Apply" to mimic kubectl apply --server-side + // But standard Create is also fine. Let's use Create for Registration. + // Update: The previous test used POST (Create). + created, err := c.KubeClient.Resource(gvr).Namespace(ns).Create(context.Background(), u, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + var result api.DiscoveryEndpoint + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(created.Object, &result); err != nil { + return nil, fmt.Errorf("failed to convert from unstructured: %v", err) + } + return &result, nil +} + +func (c *ClientHarness) ListDiscoveryEndpoints(ns string) (*api.DiscoveryEndpointList, error) { + gvr := api.DiscoveryEndpointGVR + + list, err := c.KubeClient.Resource(gvr).Namespace(ns).List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + result := &api.DiscoveryEndpointList{ + Items: make([]api.DiscoveryEndpoint, len(list.Items)), + } + for i, item := range list.Items { + var ep api.DiscoveryEndpoint + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(item.Object, &ep); err != nil { + return nil, fmt.Errorf("failed to convert item %d: %v", i, err) + } + result.Items[i] = ep + } + return result, nil +} + +// Tests + +func TestDiscoveryIsolation(t *testing.T) { + h := NewServerHarness() + defer h.Close() + + // 1. Setup Universe 1 + u1 := h.NewUniverse(t, "Universe 1 CA") + client1 := u1.NewClient("client1") + + // 2. Setup Universe 2 + u2 := h.NewUniverse(t, "Universe 2 CA") + client2 := u2.NewClient("client2") + + // Client 1 Registers + _, err := client1.RegisterDiscoveryEndpoint("default", api.DiscoveryEndpointSpec{Addresses: []string{"1.2.3.4"}}) + if err != nil { + t.Fatalf("Client 1 Register failed: %v", err) + } + + // Client 2 Registers + _, err = client2.RegisterDiscoveryEndpoint("default", api.DiscoveryEndpointSpec{Addresses: []string{"5.6.7.8"}}) + if err != nil { + t.Fatalf("Client 2 Register failed: %v", err) + } + + // Client 1 Lists - Should see only Client 1 + list1, err := client1.ListDiscoveryEndpoints("default") + if err != nil { + t.Fatalf("Client 1 List failed: %v", err) + } + if len(list1.Items) != 1 { + t.Errorf("Client 1 should see 1 node, saw %d", len(list1.Items)) + } else if list1.Items[0].ObjectMeta.Name != "client1" { + t.Errorf("Client 1 saw wrong node: %s", list1.Items[0].ObjectMeta.Name) + } + + // Client 2 Lists - Should see only Client 2 + list2, err := client2.ListDiscoveryEndpoints("default") + if err != nil { + t.Fatalf("Client 2 List failed: %v", err) + } + if len(list2.Items) != 1 { + t.Errorf("Client 2 should see 1 node, saw %d", len(list2.Items)) + } else if list2.Items[0].ObjectMeta.Name != "client2" { + t.Errorf("Client 2 saw wrong node: %s", list2.Items[0].ObjectMeta.Name) + } +} + +func TestOIDCDiscovery(t *testing.T) { + h := NewServerHarness() + defer h.Close() + + u := h.NewUniverse(t, "Universe CA") + client := u.NewClient("client1") + + // Register Endpoint with OIDC + jwksKey := api.JSONWebKey{KeyType: "RSA", KeyID: "1"} + + _, err := client.RegisterDiscoveryEndpoint("default", api.DiscoveryEndpointSpec{ + Addresses: []string{"1.2.3.4"}, + OIDC: &api.OIDCSpec{ + Keys: []api.JSONWebKey{jwksKey}, + }, + }) + if err != nil { + t.Fatalf("Register failed: %v", err) + } + + // Query Public OIDC Endpoint + anonymousClient := u.NewAnonymousClient(h.ServerURL) + + // 1. OIDC Discovery + resp, err := anonymousClient.Get(".well-known/openid-configuration") + if err != nil { + t.Fatalf("Get OIDC failed: %v", err) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("Get OIDC status: %v", resp.Status) + } + defer resp.Body.Close() + + var oidcResp map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&oidcResp); err != nil { + t.Fatalf("Decode OIDC failed: %v", err) + } + oidcIssuer := anonymousClient.ServerURL + "/" + anonymousClient.UniverseID + "/" + if oidcResp["issuer"] != oidcIssuer { + t.Errorf("Expected issuer %s, got %s", oidcIssuer, oidcResp["issuer"]) + } + expectedJWKSURI := oidcIssuer + "openid/v1/jwks" + if oidcResp["jwks_uri"] != expectedJWKSURI { + t.Errorf("Expected jwks_uri %s, got %s", expectedJWKSURI, oidcResp["jwks_uri"]) + } + + // 2. JWKS + resp, err = anonymousClient.Get("openid/v1/jwks") + if err != nil { + t.Fatalf("Get JWKS failed: %v", err) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("Get JWKS status: %v", resp.Status) + } + defer resp.Body.Close() + + var jwksResp map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&jwksResp); err != nil { + t.Fatalf("Decode JWKS failed: %v", err) + } + keys := jwksResp["keys"].([]interface{}) + if len(keys) != 1 { + t.Errorf("Expected 1 key, got %d", len(keys)) + } else { + key1 := keys[0].(map[string]interface{}) + if key1["kid"] != "1" { + t.Errorf("Expected kid 1, got %v", key1["kid"]) + } + } +} + +func TestOIDCMerging(t *testing.T) { + h := NewServerHarness() + defer h.Close() + + u := h.NewUniverse(t, "Universe CA") + + // Helper to register endpoint + register := func(name string, keys []api.JSONWebKey) { + client := u.NewClient(name) + _, err := client.RegisterDiscoveryEndpoint("default", api.DiscoveryEndpointSpec{ + OIDC: &api.OIDCSpec{ + Keys: keys, + }, + }) + if err != nil { + t.Fatalf("Register %s failed: %v", name, err) + } + } + + // Scenario: + // Client A: Has Key 1 (old) and Key 2 (new) + // Client B: Has Key 1 (newer) + + key1Old := api.JSONWebKey{KeyID: "1", N: "old"} + key1New := api.JSONWebKey{KeyID: "1", N: "new"} + key2 := api.JSONWebKey{KeyID: "2", N: "static"} + + register("clientA", []api.JSONWebKey{key1Old, key2}) + // Ensure time passes so LastSeen is updated + time.Sleep(2 * time.Second) + register("clientB", []api.JSONWebKey{key1New}) + + // Query JWKS using a public client harness + anonymousClient := u.NewAnonymousClient(h.ServerURL) + + resp, err := anonymousClient.Get("openid/v1/jwks") + if err != nil { + t.Fatalf("Get JWKS failed: %v", err) + } + defer resp.Body.Close() + + var jwksResp map[string]interface{} + json.NewDecoder(resp.Body).Decode(&jwksResp) + + keys := jwksResp["keys"].([]interface{}) + if len(keys) != 2 { + t.Errorf("Expected 2 keys, got %d", len(keys)) + } + + foundNew := false + for _, k := range keys { + km := k.(map[string]interface{}) + if km["kid"] == "1" { + if km["n"] == "new" { + foundNew = true + } else { + t.Errorf("Expected key 1 to be 'new', got '%v'", km["n"]) + } + } + } + if !foundNew { + t.Error("Key 1 not found") + } +} + +func TestServerSideApply(t *testing.T) { + h := NewServerHarness() + defer h.Close() + + u := h.NewUniverse(t, "Universe CA") + client := u.NewClient("client1") + + // 1. Initial Create (using Register helper which uses dynamic Create) + ep, err := client.RegisterDiscoveryEndpoint("default", api.DiscoveryEndpointSpec{Addresses: []string{"1.2.3.4"}}) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + if len(ep.Spec.Addresses) != 1 || ep.Spec.Addresses[0] != "1.2.3.4" { + t.Errorf("Unexpected addresses: %v", ep.Spec.Addresses) + } + + // 2. Patch (Apply) - Update addresses using Patch with ApplyPatchType + ep.Spec.Addresses = []string{"5.6.7.8"} + // Ensure TypeMeta is set for conversion/application + ep.TypeMeta.Kind = "DiscoveryEndpoint" + ep.TypeMeta.APIVersion = "discovery.kops.k8s.io/v1alpha1" + + uContent, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ep) + if err != nil { + t.Fatalf("Failed to convert to unstructured: %v", err) + } + + patchData, err := json.Marshal(uContent) + if err != nil { + t.Fatalf("Failed to marshal patch data: %v", err) + } + + gvr := api.DiscoveryEndpointGVR + + // Use server-side apply patch + patchOpts := metav1.PatchOptions{ + FieldManager: "client1", + } + + _, err = client.KubeClient.Resource(gvr).Namespace("default").Patch(context.Background(), client.Name, types.ApplyPatchType, patchData, patchOpts) + if err != nil { + t.Fatalf("Patch failed: %v", err) + } + + // 3. Verify Update + list, err := client.ListDiscoveryEndpoints("default") + if err != nil { + t.Fatalf("List failed: %v", err) + } + if len(list.Items) == 0 { + t.Fatalf("List returned no items") + } + if list.Items[0].Spec.Addresses[0] != "5.6.7.8" { + t.Errorf("Patch did not update addresses. Got: %v", list.Items[0].Spec.Addresses) + } +} diff --git a/discovery/pkg/discovery/store.go b/discovery/pkg/discovery/store.go new file mode 100644 index 0000000000000..815c195c1dcb8 --- /dev/null +++ b/discovery/pkg/discovery/store.go @@ -0,0 +1,48 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "context" + "sync" + + api "k8s.io/kops/discovery/apis/discovery.kops.k8s.io/v1alpha1" +) + +type NamespacedName struct { + Namespace string + Name string +} + +// Universe represents an isolated scope defined by a CA Public Key. +type Universe struct { + ID string `json:"id"` + DiscoveryEndpoints map[NamespacedName]*api.DiscoveryEndpoint `json:"endpoints"` + mu sync.RWMutex +} + +// Store is the interface for storage backends. +type Store interface { + // UpsertDiscoveryEndpoint creates or updates an endpoint in a specific universe + UpsertDiscoveryEndpoint(ctx context.Context, universeID string, ep *api.DiscoveryEndpoint) error + + // ListDiscoveryEndpoints lists all endpoints in a specific universe + ListDiscoveryEndpoints(ctx context.Context, universeID string) ([]*api.DiscoveryEndpoint, error) + + // GetDiscoveryEndpoint retrieves a specific endpoint + GetDiscoveryEndpoint(ctx context.Context, universeID string, ns, name string) (*api.DiscoveryEndpoint, error) +} diff --git a/discovery/scripts/create-kubeconfig.sh b/discovery/scripts/create-kubeconfig.sh new file mode 100755 index 0000000000000..df712266fa38a --- /dev/null +++ b/discovery/scripts/create-kubeconfig.sh @@ -0,0 +1,153 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +CERTS_DIR="walkthrough" +SERVER_KEY="${CERTS_DIR}/server.key" +SERVER_CRT="${CERTS_DIR}/server.crt" +UNIVERSE_CA_KEY="${CERTS_DIR}/universe1/ca.key" +UNIVERSE_CA_CRT="${CERTS_DIR}/universe1/ca.crt" +CLIENT_KEY="${CERTS_DIR}/universe1/client1.key" +CLIENT_CRT="${CERTS_DIR}/universe1/client1.crt" +CLIENT_BUNDLE="${CERTS_DIR}/universe1/client1-bundle.crt" +CLIENT_KUBECONFIG="${CERTS_DIR}/universe1/client1.kubeconfig" + +mkdir -p "$CERTS_DIR/universe1" + +# 1. Generate Server Certificate with SANs (if missing) +if [ ! -f "$SERVER_CRT" ] || [ ! -f "$SERVER_KEY" ]; then + echo "Generating Server Certificate with SANs..." + + # Create OpenSSL config for SANs + cat > "${CERTS_DIR}/server.conf" < "$CLIENT_BUNDLE" + rm "${CERTS_DIR}/client1.csr" + echo "Created $CLIENT_BUNDLE" +fi + +# Resolve absolute paths for kubeconfig +# (Function to handle macOS/Linux differences for readlink) +get_abs_path() { + echo "$(cd "$(dirname "$1")"; pwd)/$(basename "$1")" +} + +ABS_SERVER_CRT=$(get_abs_path "$SERVER_CRT") +ABS_CLIENT_BUNDLE=$(get_abs_path "$CLIENT_BUNDLE") +ABS_CLIENT_KEY=$(get_abs_path "$CLIENT_KEY") + +# 4. Generate Kubeconfig +UNIVERSE_ID=$(openssl x509 -in "$UNIVERSE_CA_CRT" -noout -fingerprint -sha256 | sed 's/SHA256 Fingerprint=//' | sed 's/://g' | tr '[:upper:]' '[:lower:]') +echo "Universe ID: ${UNIVERSE_ID}" + +cat > "$CLIENT_KUBECONFIG" < "${CERTS_DIR}/universe1/client1-discoveryendpoint.yaml" +apiVersion: discovery.kops.k8s.io/v1alpha1 +kind: DiscoveryEndpoint +metadata: + name: client1 +spec: + addresses: + - 10.0.0.1 + oidc: + issuerURL: https://issuer.example.com + keys: + - kty: RSA + kid: example-key-id + use: sig + n: example-modulus + e: AQAB +EOF + +# 6. Output Instructions +echo "Generated $OUTPUT" +echo "" +echo "To use:" +echo "1. Run server: ./discovery-server --tls-cert $SERVER_CRT --tls-key $SERVER_KEY" +echo "2. Run kubectl: kubectl --kubeconfig=$OUTPUT get discoveryendpoints" diff --git a/hack/dev-push-kops-controller.sh b/hack/dev-push-kops-controller.sh index 34033e6855dc5..8a0e97e02f8fa 100755 --- a/hack/dev-push-kops-controller.sh +++ b/hack/dev-push-kops-controller.sh @@ -27,7 +27,7 @@ fi IMAGE_TAG=$(date +%Y%m%d%H%M%S) # Build the controller image -KO_DOCKER_REPO="${IMAGE_PREFIX}kops-controller" go run github.com/google/ko@v0.14.1 \ +KO_DOCKER_REPO="${IMAGE_PREFIX}kops-controller" go run github.com/google/ko@v0.18.0 \ build --tags "${IMAGE_TAG}" --platform=linux/amd64,linux/arm64 --bare ./cmd/kops-controller/ # Update the image and bounce the pods diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index a6288698088a3..767512abb8471 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -6380,6 +6380,15 @@ spec: items: type: string type: array + discoveryService: + description: DiscoveryService configures discovery using a hosted + discovery service. + properties: + url: + description: URL is the base URL of the discovery service, + including universe ID if applicable. + type: string + type: object discoveryStore: description: DiscoveryStore is the VFS path to where OIDC Issuer Discovery metadata is stored. diff --git a/nodeup/pkg/model/context.go b/nodeup/pkg/model/context.go index 349c937f545e8..cd495f7df6fa6 100644 --- a/nodeup/pkg/model/context.go +++ b/nodeup/pkg/model/context.go @@ -72,6 +72,9 @@ type NodeupModelContext struct { // usesNoneDNS is true if the cluster runs with dns=none (which uses fixed IPs, for example a load balancer, instead of DNS) usesNoneDNS bool + // discoveryService implements discovery using a hosted discovery service. + discoveryService *nodeup.DiscoveryServiceOptions + // Deprecated: This should be renamed to controlPlaneVersion / nodeVersion; // controlPlaneVersion should probably/ideally only be populated on control plane nodes. kubernetesVersion *kopsmodel.KubernetesVersion @@ -107,6 +110,7 @@ func (c *NodeupModelContext) Init() error { c.usesNoneDNS = c.NodeupConfig.UsesNoneDNS c.usesLegacyGossip = c.NodeupConfig.UsesLegacyGossip + c.discoveryService = c.NodeupConfig.DiscoveryService return nil } @@ -521,6 +525,12 @@ func (c *NodeupModelContext) NodeName() (string, error) { return strings.ToLower(strings.TrimSpace(nodeName)), nil } +// ClusterName returns the name of the cluster, as it is registered in kOps +// (Note this name may include dots, which are not valid in many k8s objects) +func (c *NodeupModelContext) ClusterName() string { + return c.BootConfig.ClusterName +} + func (b *NodeupModelContext) AddCNIBinAssets(c *fi.NodeupModelBuilderContext) error { f := b.Assets.FindMatches(regexp.MustCompile(".*")) @@ -652,6 +662,11 @@ func (c *NodeupModelContext) UsesNoneDNS() bool { return c.usesNoneDNS } +// DiscoveryServiceOptions returns the configuration for discovery service to register with, or nil if not using a discovery service. +func (c *NodeupModelContext) DiscoveryServiceOptions() *nodeup.DiscoveryServiceOptions { + return c.discoveryService +} + func (c *NodeupModelContext) PublishesDNSRecords() bool { if c.UsesLegacyGossip() || c.UsesNoneDNS() { return false diff --git a/nodeup/pkg/model/discovery_service.go b/nodeup/pkg/model/discovery_service.go new file mode 100644 index 0000000000000..d443c622961b6 --- /dev/null +++ b/nodeup/pkg/model/discovery_service.go @@ -0,0 +1,170 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package model + +import ( + "bytes" + "context" + "crypto/rsa" + "encoding/base64" + "encoding/binary" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" +) + +// DiscoveryService registers with the discovery service. +type DiscoveryService struct { + *NodeupModelContext +} + +var _ fi.NodeupModelBuilder = &DiscoveryService{} + +// Build is responsible for configuring the discovery service registration tasks. +func (b *DiscoveryService) Build(c *fi.NodeupModelBuilderContext) error { + ctx := c.Context() + + if !b.IsMaster { + return nil + } + discoveryServiceOptions := b.DiscoveryServiceOptions() + if discoveryServiceOptions == nil { + return nil + } + + nodeName, err := b.NodeName() + if err != nil { + return err + } + + // TODO: Currently we enforce that the node name must match the certificate CN, but ... it probably doesn't matter. + // certificateName := nodeName + "." + b.ClusterName() + certificateName := nodeName + + namespace := strings.ReplaceAll(b.ClusterName(), ".", "-") + id := types.NamespacedName{ + Namespace: namespace, + Name: nodeName, + } + + issueCert := &nodetasks.IssueCert{ + Name: "discovery-service-client", + Signer: fi.DiscoveryCAID, + KeypairID: b.NodeupConfig.KeypairIDs[fi.DiscoveryCAID], + Type: "client", + Subject: nodetasks.PKIXName{ + CommonName: certificateName, + }, + AlternateNames: []string{certificateName}, + } + c.AddTask(issueCert) + + certResource, keyResource, caResource := issueCert.GetResources() + + jwks, err := findJWKSForServiceAccount(ctx, b.NodeupConfig.KeypairIDs, b.KeyStore) + if err != nil { + return err + } + registerTask := &nodetasks.DiscoveryServiceRegisterTask{ + Name: "register", + DiscoveryService: discoveryServiceOptions.URL, + ClientCert: certResource, + ClientKey: keyResource, + ClientCA: caResource, + RegisterName: id.Name, + RegisterNamespace: id.Namespace, + JWKS: jwks, + } + c.AddTask(registerTask) + + return nil +} + +func findJWKSForServiceAccount(ctx context.Context, keypairIDs map[string]string, keystore fi.KeystoreReader) ([]nodetasks.JSONWebKey, error) { + var jwks []nodetasks.JSONWebKey + + name := "service-account" + keypairID := keypairIDs[name] + if keypairID == "" { + // kOps bug where KeypairID was not populated for the node role. + return nil, fmt.Errorf("no keypair ID for %q", name) + } + + keyset, err := keystore.FindKeyset(ctx, name) + if err != nil { + return nil, err + } + if keyset == nil { + return nil, fmt.Errorf("keyset %q not found", name) + } + + for _, item := range keyset.Items { + if item.DistrustTimestamp != nil { + continue + } + if item.Certificate == nil || item.Certificate.Subject.CommonName != "service-account" { + continue + } + + publicKey := item.Certificate.PublicKey + + jwk := nodetasks.JSONWebKey{} + + { + jwk.KeyID = item.Id + // publicKeyDERBytes, err := x509.MarshalPKIXPublicKey(publicKey) + // if err != nil { + // return nil, fmt.Errorf("failed to serialize public key to DER format: %v", err) + // } + + // hasher := crypto.SHA256.New() + // hasher.Write(publicKeyDERBytes) + // publicKeyDERHash := hasher.Sum(nil) + + // jwk.KeyID = base64.RawURLEncoding.EncodeToString(publicKeyDERHash) + } + + switch publicKey := publicKey.(type) { + case *rsa.PublicKey: + jwk.Algorithm = "RS256" + jwk.Use = "sig" + jwk.N = base64.RawURLEncoding.EncodeToString(publicKey.N.Bytes()) + jwk.E = base64.RawURLEncoding.EncodeToString(uint64ToBytes(uint64(publicKey.E))) + jwk.KeyType = "RSA" + + default: + return nil, fmt.Errorf("unsupported public key type for service-account: %T", publicKey) + } + + jwks = append(jwks, jwk) + } + sort.Slice(jwks, func(i, j int) bool { + return jwks[i].KeyID < jwks[j].KeyID + }) + + return jwks, nil +} + +func uint64ToBytes(n uint64) []byte { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, n) + return bytes.TrimLeft(data, "\x00") +} diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index fdc401c031070..0fb0f12d55a9d 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -280,12 +280,21 @@ type KarpenterConfig struct { type ServiceAccountIssuerDiscoveryConfig struct { // DiscoveryStore is the VFS path to where OIDC Issuer Discovery metadata is stored. DiscoveryStore string `json:"discoveryStore,omitempty"` + // DiscoveryService configures discovery using a hosted discovery service. + DiscoveryService *DiscoveryServiceOptions `json:"discoveryService,omitempty"` // EnableAWSOIDCProvider will provision an AWS OIDC provider that trusts the ServiceAccount Issuer EnableAWSOIDCProvider bool `json:"enableAWSOIDCProvider,omitempty"` // AdditionalAudiences adds user defined audiences to the provisioned AWS OIDC provider AdditionalAudiences []string `json:"additionalAudiences,omitempty"` } +// DiscoveryServiceOptions configures a hosted discovery service. +// We leave open the possibility of configurable certificates etc in future. +type DiscoveryServiceOptions struct { + // URL is the base URL of the discovery service, including universe ID if applicable. + URL string `json:"url,omitempty"` +} + // ServiceAccountExternalPermissions grants a ServiceAccount permissions to external resources. type ServiceAccountExternalPermission struct { // Name is the name of the Kubernetes ServiceAccount. diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index 0278d92b233c6..135a2cb60a17d 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -277,12 +277,20 @@ type KarpenterConfig struct { type ServiceAccountIssuerDiscoveryConfig struct { // DiscoveryStore is the VFS path to where OIDC Issuer Discovery metadata is stored. DiscoveryStore string `json:"discoveryStore,omitempty"` + // DiscoveryService configures discovery using a hosted discovery service. + DiscoveryService *DiscoveryServiceOptions `json:"discoveryService,omitempty"` // EnableAWSOIDCProvider will provision an AWS OIDC provider that trusts the ServiceAccount Issuer EnableAWSOIDCProvider bool `json:"enableAWSOIDCProvider,omitempty"` // AdditionalAudiences adds user defined audiences to the provisioned AWS OIDC provider AdditionalAudiences []string `json:"additionalAudiences,omitempty"` } +// DiscoveryServiceOptions configures a hosted discovery service. +type DiscoveryServiceOptions struct { + // URL is the base URL of the discovery service, including universe ID if applicable. + URL string `json:"url,omitempty"` +} + // ServiceAccountExternalPermissions grants a ServiceAccount permissions to external resources. type ServiceAccountExternalPermission struct { // Name is the name of the Kubernetes ServiceAccount. diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index abff18789564b..e2b4d9ac72337 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -354,6 +354,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*DiscoveryServiceOptions)(nil), (*kops.DiscoveryServiceOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(a.(*DiscoveryServiceOptions), b.(*kops.DiscoveryServiceOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kops.DiscoveryServiceOptions)(nil), (*DiscoveryServiceOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_DiscoveryServiceOptions_To_v1alpha2_DiscoveryServiceOptions(a.(*kops.DiscoveryServiceOptions), b.(*DiscoveryServiceOptions), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*DockerConfig)(nil), (*kops.DockerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_DockerConfig_To_kops_DockerConfig(a.(*DockerConfig), b.(*kops.DockerConfig), scope) }); err != nil { @@ -3496,6 +3506,26 @@ func Convert_kops_DNSControllerGossipConfigSecondary_To_v1alpha2_DNSControllerGo return autoConvert_kops_DNSControllerGossipConfigSecondary_To_v1alpha2_DNSControllerGossipConfigSecondary(in, out, s) } +func autoConvert_v1alpha2_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(in *DiscoveryServiceOptions, out *kops.DiscoveryServiceOptions, s conversion.Scope) error { + out.URL = in.URL + return nil +} + +// Convert_v1alpha2_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions is an autogenerated conversion function. +func Convert_v1alpha2_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(in *DiscoveryServiceOptions, out *kops.DiscoveryServiceOptions, s conversion.Scope) error { + return autoConvert_v1alpha2_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(in, out, s) +} + +func autoConvert_kops_DiscoveryServiceOptions_To_v1alpha2_DiscoveryServiceOptions(in *kops.DiscoveryServiceOptions, out *DiscoveryServiceOptions, s conversion.Scope) error { + out.URL = in.URL + return nil +} + +// Convert_kops_DiscoveryServiceOptions_To_v1alpha2_DiscoveryServiceOptions is an autogenerated conversion function. +func Convert_kops_DiscoveryServiceOptions_To_v1alpha2_DiscoveryServiceOptions(in *kops.DiscoveryServiceOptions, out *DiscoveryServiceOptions, s conversion.Scope) error { + return autoConvert_kops_DiscoveryServiceOptions_To_v1alpha2_DiscoveryServiceOptions(in, out, s) +} + func autoConvert_v1alpha2_DockerConfig_To_kops_DockerConfig(in *DockerConfig, out *kops.DockerConfig, s conversion.Scope) error { out.AuthorizationPlugins = in.AuthorizationPlugins out.Bridge = in.Bridge @@ -7586,6 +7616,15 @@ func Convert_kops_ServiceAccountExternalPermission_To_v1alpha2_ServiceAccountExt func autoConvert_v1alpha2_ServiceAccountIssuerDiscoveryConfig_To_kops_ServiceAccountIssuerDiscoveryConfig(in *ServiceAccountIssuerDiscoveryConfig, out *kops.ServiceAccountIssuerDiscoveryConfig, s conversion.Scope) error { out.DiscoveryStore = in.DiscoveryStore + if in.DiscoveryService != nil { + in, out := &in.DiscoveryService, &out.DiscoveryService + *out = new(kops.DiscoveryServiceOptions) + if err := Convert_v1alpha2_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(*in, *out, s); err != nil { + return err + } + } else { + out.DiscoveryService = nil + } out.EnableAWSOIDCProvider = in.EnableAWSOIDCProvider out.AdditionalAudiences = in.AdditionalAudiences return nil @@ -7598,6 +7637,15 @@ func Convert_v1alpha2_ServiceAccountIssuerDiscoveryConfig_To_kops_ServiceAccount func autoConvert_kops_ServiceAccountIssuerDiscoveryConfig_To_v1alpha2_ServiceAccountIssuerDiscoveryConfig(in *kops.ServiceAccountIssuerDiscoveryConfig, out *ServiceAccountIssuerDiscoveryConfig, s conversion.Scope) error { out.DiscoveryStore = in.DiscoveryStore + if in.DiscoveryService != nil { + in, out := &in.DiscoveryService, &out.DiscoveryService + *out = new(DiscoveryServiceOptions) + if err := Convert_kops_DiscoveryServiceOptions_To_v1alpha2_DiscoveryServiceOptions(*in, *out, s); err != nil { + return err + } + } else { + out.DiscoveryService = nil + } out.EnableAWSOIDCProvider = in.EnableAWSOIDCProvider out.AdditionalAudiences = in.AdditionalAudiences return nil diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index fc46b433a74b0..2dd05dd80f88e 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -1705,6 +1705,22 @@ func (in *DNSSpec) DeepCopy() *DNSSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscoveryServiceOptions) DeepCopyInto(out *DiscoveryServiceOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscoveryServiceOptions. +func (in *DiscoveryServiceOptions) DeepCopy() *DiscoveryServiceOptions { + if in == nil { + return nil + } + out := new(DiscoveryServiceOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { *out = *in @@ -5868,6 +5884,11 @@ func (in *ServiceAccountExternalPermission) DeepCopy() *ServiceAccountExternalPe // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccountIssuerDiscoveryConfig) DeepCopyInto(out *ServiceAccountIssuerDiscoveryConfig) { *out = *in + if in.DiscoveryService != nil { + in, out := &in.DiscoveryService, &out.DiscoveryService + *out = new(DiscoveryServiceOptions) + **out = **in + } if in.AdditionalAudiences != nil { in, out := &in.AdditionalAudiences, &out.AdditionalAudiences *out = make([]string, len(*in)) diff --git a/pkg/apis/kops/v1alpha3/cluster.go b/pkg/apis/kops/v1alpha3/cluster.go index a9cac629937f3..a385525b033f1 100644 --- a/pkg/apis/kops/v1alpha3/cluster.go +++ b/pkg/apis/kops/v1alpha3/cluster.go @@ -271,12 +271,20 @@ type KarpenterConfig struct { type ServiceAccountIssuerDiscoveryConfig struct { // DiscoveryStore is the VFS path to where OIDC Issuer Discovery metadata is stored. DiscoveryStore string `json:"discoveryStore,omitempty"` + // DiscoveryService configures discovery using a hosted discovery service. + DiscoveryService *DiscoveryServiceOptions `json:"discoveryService,omitempty"` // EnableAWSOIDCProvider will provision an AWS OIDC provider that trusts the ServiceAccount Issuer EnableAWSOIDCProvider bool `json:"enableAWSOIDCProvider,omitempty"` // AdditionalAudiences adds user defined audiences to the provisioned AWS OIDC provider AdditionalAudiences []string `json:"additionalAudiences,omitempty"` } +// DiscoveryServiceOptions configures a hosted discovery service. +type DiscoveryServiceOptions struct { + // URL is the base URL of the discovery service, including universe ID if applicable. + URL string `json:"url,omitempty"` +} + // ServiceAccountExternalPermissions grants a ServiceAccount permissions to external resources. type ServiceAccountExternalPermission struct { // Name is the name of the Kubernetes ServiceAccount. diff --git a/pkg/apis/kops/v1alpha3/zz_generated.conversion.go b/pkg/apis/kops/v1alpha3/zz_generated.conversion.go index 38d3756a4916b..fe1b818942fb1 100644 --- a/pkg/apis/kops/v1alpha3/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha3/zz_generated.conversion.go @@ -414,6 +414,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*DiscoveryServiceOptions)(nil), (*kops.DiscoveryServiceOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(a.(*DiscoveryServiceOptions), b.(*kops.DiscoveryServiceOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kops.DiscoveryServiceOptions)(nil), (*DiscoveryServiceOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_DiscoveryServiceOptions_To_v1alpha3_DiscoveryServiceOptions(a.(*kops.DiscoveryServiceOptions), b.(*DiscoveryServiceOptions), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*DockerConfig)(nil), (*kops.DockerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_DockerConfig_To_kops_DockerConfig(a.(*DockerConfig), b.(*kops.DockerConfig), scope) }); err != nil { @@ -3759,6 +3769,26 @@ func Convert_kops_DOSpec_To_v1alpha3_DOSpec(in *kops.DOSpec, out *DOSpec, s conv return autoConvert_kops_DOSpec_To_v1alpha3_DOSpec(in, out, s) } +func autoConvert_v1alpha3_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(in *DiscoveryServiceOptions, out *kops.DiscoveryServiceOptions, s conversion.Scope) error { + out.URL = in.URL + return nil +} + +// Convert_v1alpha3_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions is an autogenerated conversion function. +func Convert_v1alpha3_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(in *DiscoveryServiceOptions, out *kops.DiscoveryServiceOptions, s conversion.Scope) error { + return autoConvert_v1alpha3_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(in, out, s) +} + +func autoConvert_kops_DiscoveryServiceOptions_To_v1alpha3_DiscoveryServiceOptions(in *kops.DiscoveryServiceOptions, out *DiscoveryServiceOptions, s conversion.Scope) error { + out.URL = in.URL + return nil +} + +// Convert_kops_DiscoveryServiceOptions_To_v1alpha3_DiscoveryServiceOptions is an autogenerated conversion function. +func Convert_kops_DiscoveryServiceOptions_To_v1alpha3_DiscoveryServiceOptions(in *kops.DiscoveryServiceOptions, out *DiscoveryServiceOptions, s conversion.Scope) error { + return autoConvert_kops_DiscoveryServiceOptions_To_v1alpha3_DiscoveryServiceOptions(in, out, s) +} + func autoConvert_v1alpha3_DockerConfig_To_kops_DockerConfig(in *DockerConfig, out *kops.DockerConfig, s conversion.Scope) error { out.AuthorizationPlugins = in.AuthorizationPlugins out.Bridge = in.Bridge @@ -7871,6 +7901,15 @@ func Convert_kops_ServiceAccountExternalPermission_To_v1alpha3_ServiceAccountExt func autoConvert_v1alpha3_ServiceAccountIssuerDiscoveryConfig_To_kops_ServiceAccountIssuerDiscoveryConfig(in *ServiceAccountIssuerDiscoveryConfig, out *kops.ServiceAccountIssuerDiscoveryConfig, s conversion.Scope) error { out.DiscoveryStore = in.DiscoveryStore + if in.DiscoveryService != nil { + in, out := &in.DiscoveryService, &out.DiscoveryService + *out = new(kops.DiscoveryServiceOptions) + if err := Convert_v1alpha3_DiscoveryServiceOptions_To_kops_DiscoveryServiceOptions(*in, *out, s); err != nil { + return err + } + } else { + out.DiscoveryService = nil + } out.EnableAWSOIDCProvider = in.EnableAWSOIDCProvider out.AdditionalAudiences = in.AdditionalAudiences return nil @@ -7883,6 +7922,15 @@ func Convert_v1alpha3_ServiceAccountIssuerDiscoveryConfig_To_kops_ServiceAccount func autoConvert_kops_ServiceAccountIssuerDiscoveryConfig_To_v1alpha3_ServiceAccountIssuerDiscoveryConfig(in *kops.ServiceAccountIssuerDiscoveryConfig, out *ServiceAccountIssuerDiscoveryConfig, s conversion.Scope) error { out.DiscoveryStore = in.DiscoveryStore + if in.DiscoveryService != nil { + in, out := &in.DiscoveryService, &out.DiscoveryService + *out = new(DiscoveryServiceOptions) + if err := Convert_kops_DiscoveryServiceOptions_To_v1alpha3_DiscoveryServiceOptions(*in, *out, s); err != nil { + return err + } + } else { + out.DiscoveryService = nil + } out.EnableAWSOIDCProvider = in.EnableAWSOIDCProvider out.AdditionalAudiences = in.AdditionalAudiences return nil diff --git a/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go index 065d38ef89275..d0341614fe8da 100644 --- a/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go @@ -1611,6 +1611,22 @@ func (in *DOSpec) DeepCopy() *DOSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscoveryServiceOptions) DeepCopyInto(out *DiscoveryServiceOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscoveryServiceOptions. +func (in *DiscoveryServiceOptions) DeepCopy() *DiscoveryServiceOptions { + if in == nil { + return nil + } + out := new(DiscoveryServiceOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { *out = *in @@ -5820,6 +5836,11 @@ func (in *ServiceAccountExternalPermission) DeepCopy() *ServiceAccountExternalPe // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccountIssuerDiscoveryConfig) DeepCopyInto(out *ServiceAccountIssuerDiscoveryConfig) { *out = *in + if in.DiscoveryService != nil { + in, out := &in.DiscoveryService, &out.DiscoveryService + *out = new(DiscoveryServiceOptions) + **out = **in + } if in.AdditionalAudiences != nil { in, out := &in.AdditionalAudiences, &out.AdditionalAudiences *out = make([]string, len(*in)) diff --git a/pkg/apis/kops/validation/legacy.go b/pkg/apis/kops/validation/legacy.go index e5427cb5d8c76..b49d3e7ba00a3 100644 --- a/pkg/apis/kops/validation/legacy.go +++ b/pkg/apis/kops/validation/legacy.go @@ -214,18 +214,25 @@ func validateServiceAccountIssuerDiscovery(c *kops.Cluster, said *kops.ServiceAc return nil } allErrs := field.ErrorList{} - saidStore := said.DiscoveryStore - if saidStore != "" { + discoveryStore := said.DiscoveryStore + discoveryService := said.DiscoveryService + + if discoveryStore != "" && discoveryService != nil { + bothField := fieldSpec.Child("discoveryStore") + allErrs = append(allErrs, field.Forbidden(bothField, "Only one of discoveryStore or discoveryService may be specified")) + } + + if discoveryStore != "" { saidStoreField := fieldSpec.Child("serviceAccountIssuerDiscovery", "discoveryStore") - base, err := vfsContext.BuildVfsPath(saidStore) + base, err := vfsContext.BuildVfsPath(discoveryStore) if err != nil { - allErrs = append(allErrs, field.Invalid(saidStoreField, saidStore, "not a valid VFS path")) + allErrs = append(allErrs, field.Invalid(saidStoreField, discoveryStore, "not a valid VFS path")) } else { switch base := base.(type) { case *vfs.S3Path: // S3 bucket should not contain dots because of the wildcard certificate if strings.Contains(base.Bucket(), ".") { - allErrs = append(allErrs, field.Invalid(saidStoreField, saidStore, "Bucket name cannot contain dots")) + allErrs = append(allErrs, field.Invalid(saidStoreField, discoveryStore, "Bucket name cannot contain dots")) } case *vfs.GSPath: // No known restrictions currently. Added here to avoid falling into the default catch all below. @@ -233,17 +240,25 @@ func validateServiceAccountIssuerDiscovery(c *kops.Cluster, said *kops.ServiceAc // memfs is ok for tests; not OK otherwise if !base.IsClusterReadable() { // (If this _is_ a test, we should call MarkClusterReadable) - allErrs = append(allErrs, field.Invalid(saidStoreField, saidStore, "S3 is the only supported VFS for discoveryStore")) + allErrs = append(allErrs, field.Invalid(saidStoreField, discoveryStore, "S3 is the only supported VFS for discoveryStore")) } default: - allErrs = append(allErrs, field.Invalid(saidStoreField, saidStore, "S3 is the only supported VFS for discoveryStore")) + allErrs = append(allErrs, field.Invalid(saidStoreField, discoveryStore, "S3 is the only supported VFS for discoveryStore")) } } } + + if discoveryService != nil { + saidServiceField := fieldSpec.Child("serviceAccountIssuerDiscovery", "discoveryService", "url") + if discoveryService.URL == "" { + allErrs = append(allErrs, field.Required(saidServiceField, "discoveryService URL must be specified")) + } + } + if said.EnableAWSOIDCProvider { enableOIDCField := fieldSpec.Child("serviceAccountIssuerDiscovery", "enableAWSOIDCProvider") - if saidStore == "" { - allErrs = append(allErrs, field.Forbidden(enableOIDCField, "AWS OIDC Provider requires a discovery store")) + if discoveryStore == "" && discoveryService == nil { + allErrs = append(allErrs, field.Forbidden(enableOIDCField, "AWS OIDC Provider requires a discoveryStore or discoveryService to be set")) } } diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index 25e17c6e2c814..ef93e8c1c4fe1 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -1731,6 +1731,22 @@ func (in *DOSpec) DeepCopy() *DOSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscoveryServiceOptions) DeepCopyInto(out *DiscoveryServiceOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscoveryServiceOptions. +func (in *DiscoveryServiceOptions) DeepCopy() *DiscoveryServiceOptions { + if in == nil { + return nil + } + out := new(DiscoveryServiceOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { *out = *in @@ -6127,6 +6143,11 @@ func (in *ServiceAccountExternalPermission) DeepCopy() *ServiceAccountExternalPe // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccountIssuerDiscoveryConfig) DeepCopyInto(out *ServiceAccountIssuerDiscoveryConfig) { *out = *in + if in.DiscoveryService != nil { + in, out := &in.DiscoveryService, &out.DiscoveryService + *out = new(DiscoveryServiceOptions) + **out = **in + } if in.AdditionalAudiences != nil { in, out := &in.AdditionalAudiences, &out.AdditionalAudiences *out = make([]string, len(*in)) diff --git a/pkg/apis/nodeup/config.go b/pkg/apis/nodeup/config.go index 7b108bb902b42..cf01203240d90 100644 --- a/pkg/apis/nodeup/config.go +++ b/pkg/apis/nodeup/config.go @@ -20,6 +20,7 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/aws" + "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops/model" "k8s.io/kops/util/pkg/architectures" @@ -142,6 +143,15 @@ type Config struct { // Discovery methods UsesLegacyGossip bool `json:"usesLegacyGossip"` UsesNoneDNS bool `json:"usesNoneDNS"` + + // DiscoveryService implements discovery using a hosted discovery service. + DiscoveryService *DiscoveryServiceOptions `json:"discoveryServiceWithUniverse,omitempty"` +} + +// DiscoveryServiceOptions is the configuration for a discovery service. +type DiscoveryServiceOptions struct { + // URL is the base URL of the discovery service, including universe ID if applicable. + URL string `json:"url,omitempty"` } // BootConfig is the configuration for the nodeup binary that might be too big to fit in userdata. @@ -240,6 +250,13 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi UsesNoneDNS: cluster.UsesNoneDNS(), } + if cluster.Spec.ServiceAccountIssuerDiscovery != nil && cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService != nil { + discoveryService := cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService + config.DiscoveryService = &DiscoveryServiceOptions{ + URL: discoveryService.URL, + } + } + bootConfig := BootConfig{ CloudProvider: cluster.GetCloudProvider(), ClusterName: cluster.ObjectMeta.Name, diff --git a/pkg/client/simple/vfsclientset/clientset.go b/pkg/client/simple/vfsclientset/clientset.go index a6fbc9e1b44b8..be51d541ab3c1 100644 --- a/pkg/client/simple/vfsclientset/clientset.go +++ b/pkg/client/simple/vfsclientset/clientset.go @@ -212,6 +212,11 @@ func (c *VFSClientset) DeleteCluster(ctx context.Context, cluster *kops.Cluster) return err } } + + if discoveryService := cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService; discoveryService != nil { + // TODO: implement deletion of discovery service resources + klog.Warningf("automatic cleanup of discovery service resources not yet implemented") + } } secretStore := cluster.Spec.ConfigStore.Secrets diff --git a/pkg/featureflag/featureflag.go b/pkg/featureflag/featureflag.go index 67888182ef7b5..64005e80e1b5c 100644 --- a/pkg/featureflag/featureflag.go +++ b/pkg/featureflag/featureflag.go @@ -100,6 +100,8 @@ var ( AWSSingleNodesInstanceGroup = new("AWSSingleNodesInstanceGroup", Bool(false)) // ClusterAPI enables support for Cluster API (CAPI) resources. ClusterAPI = new("ClusterAPI", Bool(false)) + // DiscoveryService enables support for OIDC discovery via a hosted service. + DiscoveryService = new("DiscoveryService", Bool(false)) ) // FeatureFlag defines a feature flag diff --git a/pkg/model/bootstrapscript.go b/pkg/model/bootstrapscript.go index 21bacfeb4e18b..5bcfdbf8b227e 100644 --- a/pkg/model/bootstrapscript.go +++ b/pkg/model/bootstrapscript.go @@ -179,6 +179,15 @@ func KeypairNamesForInstanceGroup(cluster *kops.Cluster, ig *kops.InstanceGroup) } } + // Add keypair for discovery service CA if enabled + if ig.IsControlPlane() { + if cluster.Spec.ServiceAccountIssuerDiscovery != nil && + cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService != nil && + cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService.URL != "" { + keypairs = append(keypairs, fi.DiscoveryCAID) + } + } + if ig.HasAPIServer() { keypairs = append(keypairs, "apiserver-aggregator-ca", "service-account", "etcd-clients-ca") } @@ -213,13 +222,13 @@ func (b *BootstrapScriptBuilder) ResourceNodeUp(c *fi.CloudupModelBuilderContext } } - caTasks := map[string]*fitasks.Keypair{} + keypairTasks := map[string]*fitasks.Keypair{} for _, keypair := range keypairNames { caTaskObject, found := c.Tasks["Keypair/"+keypair] if !found { return nil, fmt.Errorf("keypair/%s task not found", keypair) } - caTasks[keypair] = caTaskObject.(*fitasks.Keypair) + keypairTasks[keypair] = caTaskObject.(*fitasks.Keypair) } task := &BootstrapScript{ @@ -228,7 +237,7 @@ func (b *BootstrapScriptBuilder) ResourceNodeUp(c *fi.CloudupModelBuilderContext cluster: b.Cluster, ig: ig, builder: b, - caTasks: caTasks, + caTasks: keypairTasks, } task.resource.Task = task task.nodeupConfig.Task = task diff --git a/pkg/model/components/discovery.go b/pkg/model/components/discovery.go index 5494813b530d5..55d3c517423ae 100644 --- a/pkg/model/components/discovery.go +++ b/pkg/model/components/discovery.go @@ -75,6 +75,13 @@ func (b *DiscoveryOptionsBuilder) BuildOptions(o *kops.Cluster) error { default: return fmt.Errorf("locationStore=%q is of unexpected type %T", store, base) } + } else if said != nil && said.DiscoveryService != nil { + discoveryService := said.DiscoveryService + + serviceAccountIssuer = discoveryService.URL + if serviceAccountIssuer == "" { + return fmt.Errorf("discoveryService URL must be specified") + } } else { if supportsPublicJWKS(clusterSpec) && clusterSpec.API.PublicName != "" { serviceAccountIssuer = "https://" + clusterSpec.API.PublicName diff --git a/pkg/model/issuerdiscovery.go b/pkg/model/issuerdiscovery.go index a6c3231be1074..a7c71deaeb597 100644 --- a/pkg/model/issuerdiscovery.go +++ b/pkg/model/issuerdiscovery.go @@ -56,8 +56,8 @@ type oidcDiscovery struct { func (b *IssuerDiscoveryModelBuilder) Build(c *fi.CloudupModelBuilderContext) error { ctx := context.TODO() - said := b.Cluster.Spec.ServiceAccountIssuerDiscovery - if said == nil || said.DiscoveryStore == "" { + serviceAccountIssuerDiscovery := b.Cluster.Spec.ServiceAccountIssuerDiscovery + if serviceAccountIssuerDiscovery == nil || serviceAccountIssuerDiscovery.DiscoveryStore == "" { return nil } diff --git a/pkg/model/pki.go b/pkg/model/pki.go index d52960ed693d7..538e0717f4e44 100644 --- a/pkg/model/pki.go +++ b/pkg/model/pki.go @@ -42,6 +42,19 @@ func (b *PKIModelBuilder) Build(c *fi.CloudupModelBuilderContext) error { } c.AddTask(defaultCA) + if b.Cluster.Spec.ServiceAccountIssuerDiscovery != nil && + b.Cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService != nil && + b.Cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService.URL != "" { + // TODO: Only create the discovery CA via this task (but it's tricky because we need the ID so early) + discoveryCA := &fitasks.Keypair{ + Name: fi.PtrTo(string(fi.DiscoveryCAID)), + Lifecycle: b.Lifecycle, + Subject: "cn=" + fi.DiscoveryCAID, + Type: "ca", + } + c.AddTask(discoveryCA) + } + { aggregatorCA := &fitasks.Keypair{ Name: fi.PtrTo("apiserver-aggregator-ca"), diff --git a/pkg/nodemodel/nodeupconfigbuilder.go b/pkg/nodemodel/nodeupconfigbuilder.go index 1f04ef7ee85d8..35446f32410f0 100644 --- a/pkg/nodemodel/nodeupconfigbuilder.go +++ b/pkg/nodemodel/nodeupconfigbuilder.go @@ -276,6 +276,13 @@ func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, wellKnownAddre } } config.KeypairIDs["service-account"] = keysets["service-account"].Primary.Id + + // Add key for registering with the discovery service (if configured) + if cluster.Spec.ServiceAccountIssuerDiscovery != nil && + cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService != nil && + cluster.Spec.ServiceAccountIssuerDiscovery.DiscoveryService.URL != "" { + config.KeypairIDs[fi.DiscoveryCAID] = keysets[fi.DiscoveryCAID].Primary.Id + } } else { if keysets["etcd-client-cilium"] != nil { config.KeypairIDs["etcd-client-cilium"] = keysets["etcd-client-cilium"].Primary.Id diff --git a/tests/e2e/scenarios/discovery-service/run-test b/tests/e2e/scenarios/discovery-service/run-test new file mode 100755 index 0000000000000..0c7d47c2e7de2 --- /dev/null +++ b/tests/e2e/scenarios/discovery-service/run-test @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Copyright 2025 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -euo pipefail + +REPO_ROOT=$(git rev-parse --show-toplevel) +cd ${REPO_ROOT} + +CLUSTER_NAME="discovery-test1.k8s.local" + + +CLOUD_PROVIDER=gce +ZONES=us-east4-a + +OVERRIDES="${OVERRIDES-} --node-count=2" # We need at least 2 nodes for CoreDNS to validate +OVERRIDES="${OVERRIDES} --gce-service-account=default" # Use default service account because boskos permissions are limited +OVERRIDES="${OVERRIDES} --discovery-service=https://discovery.kubedisco.com" + + +# Enable feature flag for Discovery Service support +export KOPS_FEATURE_FLAGS=DiscoveryService + +# Build kOps binary +WORKDIR=${REPO_ROOT}/.build/ + +BINDIR=${WORKDIR}/bin +mkdir -p "${BINDIR}" +go build -o ${BINDIR}/kops ./cmd/kops +export KOPS=${BINDIR}/kops + +. hack/dev-build-gce.sh + +# Delete cluster when done +function cleanup() { + if [[ -z "${SKIP_CLEANUP:-}" ]]; then + echo "running cleanup" + ${KOPS} delete cluster ${CLUSTER_NAME} --yes || true + fi +} +trap cleanup EXIT + +# Create kOps cluster +${KOPS} create cluster ${CLUSTER_NAME} --cloud=${CLOUD_PROVIDER} --zones=${ZONES} ${OVERRIDES:-} +${KOPS} update cluster ${CLUSTER_NAME} --yes --admin +${KOPS} validate cluster ${CLUSTER_NAME} --wait=10m + +# Verify that the Discovery Service is working as expected +DISCOVERY_SERVICE_URL=$(${KOPS} get cluster discovery-test1.k8s.local -ojson | jq -r .spec.serviceAccountIssuerDiscovery.discoveryService.url) +echo "Discovery Service URL: ${DISCOVERY_SERVICE_URL}" + +echo "Fetching OpenID configuration from Discovery Service:" +curl ${DISCOVERY_SERVICE_URL}.well-known/openid-configuration | jq . + +JWKS_URI=$(curl -s ${DISCOVERY_SERVICE_URL}.well-known/openid-configuration | jq -r .jwks_uri) +echo "JWKS_URI: ${JWKS_URI}" + +echo "Fetching JWKS from ${JWKS_URI}:" +curl ${JWKS_URI} | jq . diff --git a/upup/pkg/fi/ca.go b/upup/pkg/fi/ca.go index b8f0402762777..5c504b70ac2be 100644 --- a/upup/pkg/fi/ca.go +++ b/upup/pkg/fi/ca.go @@ -32,8 +32,12 @@ import ( "k8s.io/kops/util/pkg/vfs" ) +// CertificateIDCA is the ID for the primary cluster CA certificate/key const CertificateIDCA = "kubernetes-ca" +// DiscoveryCAID is the ID for the discovery service client certificate CA +const DiscoveryCAID = "discovery-ca" + const ( // SecretNameSSHPrimary is the Name for the primary SSH key SecretNameSSHPrimary = "admin" diff --git a/upup/pkg/fi/cloudup/new_cluster.go b/upup/pkg/fi/cloudup/new_cluster.go index 0a3dc28d6b1fa..506d87579cf4a 100644 --- a/upup/pkg/fi/cloudup/new_cluster.go +++ b/upup/pkg/fi/cloudup/new_cluster.go @@ -18,6 +18,7 @@ package cloudup import ( "context" + "crypto/x509/pkix" "fmt" "os" "strconv" @@ -37,12 +38,16 @@ import ( "k8s.io/kops/pkg/client/simple" "k8s.io/kops/pkg/clouds" "k8s.io/kops/pkg/featureflag" + "k8s.io/kops/pkg/pki" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/cloudup/awsup" "k8s.io/kops/upup/pkg/fi/cloudup/azure" "k8s.io/kops/upup/pkg/fi/cloudup/gce" "k8s.io/kops/upup/pkg/fi/cloudup/openstack" + "k8s.io/kops/upup/pkg/fi/fitasks" "k8s.io/kops/util/pkg/architectures" + + discoveryapi "k8s.io/kops/discovery/apis/discovery.kops.k8s.io/v1alpha1" ) const ( @@ -62,6 +67,11 @@ type NewClusterOptions struct { ConfigBase string // DiscoveryStore is the location where we will store public OIDC-compatible discovery documents, under a cluster-specific directory. It defaults to not publishing discovery documents. DiscoveryStore string + + // PublicDiscoveryServiceURL indicates that we should use a public discovery service URL for OIDC discovery. + // We create a discovery ID CA, and append the universe ID to the URL. + PublicDiscoveryServiceURL string + // KubernetesVersion is the version of Kubernetes to deploy. It defaults to the version recommended by the channel. KubernetesVersion string // KubernetesFeatureGates is the list of Kubernetes feature gates to enable/disable. @@ -197,6 +207,8 @@ type NewClusterResult struct { // It is the responsibility of the caller to call cloudup.PerformAssignments() on // the returned cluster spec. func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewClusterResult, error) { + ctx := context.TODO() + if opt.ClusterName == "" { return nil, fmt.Errorf("name is required") } @@ -415,6 +427,35 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster return nil, err } + if opt.PublicDiscoveryServiceURL != "" { + discoveryServiceURL := opt.PublicDiscoveryServiceURL + + keystore, err := clientset.KeyStore(cluster) + if err != nil { + return nil, err + } + + universeID, err := discoveryUniverseID(ctx, keystore) + if err != nil { + return nil, err + } + + if !strings.HasSuffix(discoveryServiceURL, "/") { + discoveryServiceURL += "/" + } + discoveryServiceURL += universeID + "/" + + cluster.Spec.ServiceAccountIssuerDiscovery = &api.ServiceAccountIssuerDiscoveryConfig{ + DiscoveryService: &api.DiscoveryServiceOptions{ + URL: discoveryServiceURL, + }, + } + if cluster.GetCloudProvider() == api.CloudProviderAWS { + cluster.Spec.ServiceAccountIssuerDiscovery.EnableAWSOIDCProvider = true + cluster.Spec.IAM.UseServiceAccountExternalPermissions = fi.PtrTo(true) + } + } + var nodes []*api.InstanceGroup switch opt.InstanceManager { @@ -1680,3 +1721,31 @@ func MachineArchitecture(cloud fi.Cloud, machineType string) (architectures.Arch return architectures.ArchitectureAmd64, nil } } + +// discoveryUniverseID returns the universe ID for the cluster's discovery service, +// creating a new discovery CA if necessary. +func discoveryUniverseID(ctx context.Context, keystore fi.Keystore) (string, error) { + keyset, err := keystore.FindKeyset(ctx, fi.DiscoveryCAID) + if err != nil { + return "", fmt.Errorf("error finding discovery CA: %w", err) + } + + if keyset == nil || keyset.Primary == nil || keyset.Primary.Certificate == nil { + subject := pkix.Name{ + CommonName: fi.DiscoveryCAID, + } + keyset, err = fitasks.CreateKeyset(ctx, keystore, fi.DiscoveryCAID, pki.IssueCertRequest{ + Subject: subject, + Type: "ca", + }) + if err != nil { + return "", fmt.Errorf("error creating discovery CA: %w", err) + } + } + + if keyset == nil || keyset.Primary == nil || keyset.Primary.Certificate == nil || keyset.Primary.Certificate.Certificate == nil { + return "", fmt.Errorf("discovery CA creation failed") + } + + return discoveryapi.ComputeUniverseIDFromCertificate(keyset.Primary.Certificate.Certificate), nil +} diff --git a/upup/pkg/fi/fitasks/keypair.go b/upup/pkg/fi/fitasks/keypair.go index fa526ae6d3c27..4863a64c767c5 100644 --- a/upup/pkg/fi/fitasks/keypair.go +++ b/upup/pkg/fi/fitasks/keypair.go @@ -17,6 +17,7 @@ limitations under the License. package fitasks import ( + "context" "crypto/x509/pkix" "fmt" "sort" @@ -197,25 +198,13 @@ func (_ *Keypair) Render(c *fi.CloudupContext, a, e, changes *Keypair) error { if createCertificate { klog.V(2).Infof("Creating PKI keypair %q", name) - keyset, err := c.T.Keystore.FindKeyset(ctx, name) + subjectPkix, err := parsePkixName(e.Subject) if err != nil { - return err - } - if keyset == nil { - keyset = &fi.Keyset{ - Items: map[string]*fi.KeysetItem{}, - } + return fmt.Errorf("error parsing Subject: %v", err) } - // We always reuse the private key if it exists, - // if we change keys we often have to regenerate e.g. the service accounts - // TODO: Eventually rotate keys / don't always reuse? - var privateKey *pki.PrivateKey - if keyset.Primary != nil { - privateKey = keyset.Primary.PrivateKey - } - if privateKey == nil { - klog.V(2).Infof("Creating privateKey %q", name) + if len(subjectPkix.ToRDNSequence()) == 0 { + return fmt.Errorf("subject name was empty for SSL keypair %q", *e.Name) } signer := fi.CertificateIDCA @@ -223,45 +212,16 @@ func (_ *Keypair) Render(c *fi.CloudupContext, a, e, changes *Keypair) error { signer = fi.ValueOf(e.Signer.Name) } - klog.Infof("Issuing new certificate: %q", *e.Name) - - serial := pki.BuildPKISerial(time.Now().UnixNano()) - - subjectPkix, err := parsePkixName(e.Subject) - if err != nil { - return fmt.Errorf("error parsing Subject: %v", err) - } - - if len(subjectPkix.ToRDNSequence()) == 0 { - return fmt.Errorf("subject name was empty for SSL keypair %q", *e.Name) - } - req := pki.IssueCertRequest{ Signer: signer, Type: e.Type, Subject: *subjectPkix, AlternateNames: e.AlternateNames, - PrivateKey: privateKey, - Serial: serial, - } - cert, privateKey, _, err := pki.IssueCert(ctx, &req, fi.NewPKIKeystoreAdapter(c.T.Keystore)) - if err != nil { - return err - } - - serialString := cert.Certificate.SerialNumber.String() - ki := &fi.KeysetItem{ - Id: serialString, - Certificate: cert, - PrivateKey: privateKey, } - keyset.LegacyFormat = false - keyset.Items[ki.Id] = ki - keyset.Primary = ki - err = c.T.Keystore.StoreKeyset(ctx, name, keyset) + keyset, err := CreateKeyset(ctx, c.T.Keystore, name, req) if err != nil { - return err + return fmt.Errorf("error creating certificate: %v", err) } if err := e.setResources(keyset); err != nil { @@ -275,7 +235,7 @@ func (_ *Keypair) Render(c *fi.CloudupContext, a, e, changes *Keypair) error { return fmt.Errorf("unable to find created certificate %q: %w", name, err) } - klog.V(8).Infof("created certificate with cn=%s", cert.Subject.CommonName) + klog.V(8).Infof("created certificate with subject %v", subjectPkix) } // TODO: Check correct subject / flags @@ -301,6 +261,62 @@ func (_ *Keypair) Render(c *fi.CloudupContext, a, e, changes *Keypair) error { } return nil + +} + +func CreateKeyset(ctx context.Context, keystore fi.Keystore, name string, req pki.IssueCertRequest) (*fi.Keyset, error) { + keyset, err := keystore.FindKeyset(ctx, name) + if err != nil { + return nil, err + } + if keyset == nil { + keyset = &fi.Keyset{ + Items: map[string]*fi.KeysetItem{}, + } + } + + if req.Serial == nil { + serial := pki.BuildPKISerial(time.Now().UnixNano()) + req.Serial = serial + } + + // We always reuse the private key if it exists, + // if we change keys we often have to regenerate e.g. the service accounts + // TODO: Eventually rotate keys / don't always reuse? + var privateKey *pki.PrivateKey + if keyset.Primary != nil { + privateKey = keyset.Primary.PrivateKey + } + if privateKey == nil { + klog.V(2).Infof("Creating privateKey %q", name) + } + + req.PrivateKey = privateKey + + klog.Infof("Issuing new certificate: %q", name) + + cert, privateKey, _, err := pki.IssueCert(ctx, &req, fi.NewPKIKeystoreAdapter(keystore)) + if err != nil { + return nil, err + } + + serialString := cert.Certificate.SerialNumber.String() + ki := &fi.KeysetItem{ + Id: serialString, + Certificate: cert, + PrivateKey: privateKey, + } + + keyset.LegacyFormat = false + keyset.Items[ki.Id] = ki + keyset.Primary = ki + + err = keystore.StoreKeyset(ctx, name, keyset) + if err != nil { + return nil, err + } + + return keyset, nil } func parsePkixName(s string) (*pkix.Name, error) { diff --git a/upup/pkg/fi/nodeup/command.go b/upup/pkg/fi/nodeup/command.go index 6f5d4f33d8e84..6486e08ba2b18 100644 --- a/upup/pkg/fi/nodeup/command.go +++ b/upup/pkg/fi/nodeup/command.go @@ -295,6 +295,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error { } loader := &Loader{} + loader.Builders = append(loader.Builders, &model.DiscoveryService{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.EtcHostsBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.NTPBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.DirectoryBuilder{NodeupModelContext: modelContext}) diff --git a/upup/pkg/fi/nodeup/nodetasks/discovery_service_register.go b/upup/pkg/fi/nodeup/nodetasks/discovery_service_register.go new file mode 100644 index 0000000000000..756b134de6e75 --- /dev/null +++ b/upup/pkg/fi/nodeup/nodetasks/discovery_service_register.go @@ -0,0 +1,174 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodetasks + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + + discoveryapi "k8s.io/kops/discovery/apis/discovery.kops.k8s.io/v1alpha1" + "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/nodeup/local" +) + +// DiscoveryServiceRegisterTask is responsible for registering with the discovery service. +type DiscoveryServiceRegisterTask struct { + // Name is a reference for our task + Name string + + // DiscoveryService is the discovery service to register with (including the universe ID prefix) + DiscoveryService string + + // RegisterNamespace is the namespace to use for registration with the discovery service + RegisterNamespace string + + // RegisterName is the name to use for registration with the discovery service + RegisterName string + + // ClientCert is the client certificate to present when registering + ClientCert fi.Resource + + // ClientKey is the client key to use when registering + ClientKey fi.Resource + + // ClientCA is the CA certificate to use when registering, + // we include it in the bundle presented to the server, + // as it is likely self-signed. + ClientCA fi.Resource + + // JWKS is the set of public keys to advertise through the discovery service. + JWKS []JSONWebKey +} + +// JSONWebKey wraps discoveryapi.JSONWebKey, to implement dependency discovery. +type JSONWebKey struct { + discoveryapi.JSONWebKey +} + +var _ fi.NodeupHasDependencies = &JSONWebKey{} + +// GetDependencies returns the dependencies for the JSONWebKey; there are none. +func (j *JSONWebKey) GetDependencies(tasks map[string]fi.NodeupTask) []fi.NodeupTask { + return nil +} + +var _ fi.NodeupTask = &UpdateEtcHostsTask{} + +func (e *DiscoveryServiceRegisterTask) String() string { + return fmt.Sprintf("DiscoveryServiceRegisterTask: %s", e.Name) +} + +var _ fi.HasName = &DiscoveryServiceRegisterTask{} + +func (f *DiscoveryServiceRegisterTask) GetName() *string { + return &f.Name +} + +func (e *DiscoveryServiceRegisterTask) Find(c *fi.NodeupContext) (*DiscoveryServiceRegisterTask, error) { + // We always register with the service. + return nil, nil +} + +func (e *DiscoveryServiceRegisterTask) Run(c *fi.NodeupContext) error { + return fi.NodeupDefaultDeltaRunMethod(e, c) +} + +func (_ *DiscoveryServiceRegisterTask) CheckChanges(a, e, changes *DiscoveryServiceRegisterTask) error { + return nil +} + +func (_ *DiscoveryServiceRegisterTask) RenderLocal(t *local.LocalTarget, a, e, changes *DiscoveryServiceRegisterTask) error { + ctx := context.TODO() + + log := klog.FromContext(ctx) + + clientCert, err := fi.ResourceAsBytes(e.ClientCert) + if err != nil { + return err + } + clientKey, err := fi.ResourceAsBytes(e.ClientKey) + if err != nil { + return err + } + clientCA, err := fi.ResourceAsBytes(e.ClientCA) + if err != nil { + return err + } + + clientCertBundle := []byte{} + clientCertBundle = append(clientCertBundle, clientCert...) + clientCertBundle = append(clientCertBundle, clientCA...) + + config := &rest.Config{ + Host: e.DiscoveryService, + TLSClientConfig: rest.TLSClientConfig{ + CertData: clientCertBundle, + KeyData: clientKey, + }, + } + kubeClient, err := dynamic.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating dynamic client: %w", err) + } + + spec := discoveryapi.DiscoveryEndpointSpec{} + + spec.OIDC = &discoveryapi.OIDCSpec{} + + for _, jwk := range e.JWKS { + spec.OIDC.Keys = append(spec.OIDC.Keys, jwk.JSONWebKey) + } + + ep := &discoveryapi.DiscoveryEndpoint{ + Spec: spec, + } + + ep.Kind = "DiscoveryEndpoint" + ep.APIVersion = "discovery.kops.k8s.io/v1alpha1" + + ep.Name = e.RegisterName + ep.Namespace = e.RegisterNamespace + + // Convert to Unstructured + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ep) + if err != nil { + return fmt.Errorf("failed to convert to unstructured: %w", err) + } + u := &unstructured.Unstructured{Object: obj} + gvr := discoveryapi.DiscoveryEndpointGVR + + // Use Server-Side Apply to Create/Update + created, err := kubeClient.Resource(gvr).Namespace(u.GetNamespace()).Apply(ctx, u.GetName(), u, metav1.ApplyOptions{FieldManager: "nodeup-register"}) + if err != nil { + return fmt.Errorf("failed to register with discovery service: %w", err) + } + + var result discoveryapi.DiscoveryEndpoint + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(created.Object, &result); err != nil { + return fmt.Errorf("failed to convert from unstructured: %w", err) + } + log.Info("registered with discovery service", "result", result) + + return nil +} diff --git a/upup/pkg/fi/nodeup/nodetasks/issue_cert.go b/upup/pkg/fi/nodeup/nodetasks/issue_cert.go index eff790cc2cd6c..82f4620da7243 100644 --- a/upup/pkg/fi/nodeup/nodetasks/issue_cert.go +++ b/upup/pkg/fi/nodeup/nodetasks/issue_cert.go @@ -219,7 +219,7 @@ func newStaticKeystore(ctx context.Context, signer string, keypairID string, key } if keypairID == "" { - return nil, fmt.Errorf("missing keypairID for %s", signer) + return nil, fmt.Errorf("missing keypairID for signer %s", signer) } keyset, err := keystore.FindKeyset(ctx, signer)