From 1eaac4a5407c74eff01d50af069bb494ca5e36e1 Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Tue, 29 Apr 2025 17:04:06 +0200 Subject: [PATCH 1/2] Check for broken links in CI workflow Signed-off-by: Alexandr Demicev --- Makefile | 8 +- tools/verifybrokenlinks/main.go | 209 ++++++++++++++++++++++++++++++++ 2 files changed, 216 insertions(+), 1 deletion(-) create mode 100644 tools/verifybrokenlinks/main.go diff --git a/Makefile b/Makefile index 5204b79b..d5ce71cc 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,13 @@ watch: environment ## Watch for changes, rebuild, and preview with hot reload. "nodemon --watch content --watch docs --ext adoc,yml --exec 'make dev'" \ "make preview" +##@ Verification + +.PHONY: verify-broken-links +verify-broken-links: ## Verify broken GitHub links in .adoc files. + go run tools/verifybrokenlinks/main.go -docs-dir=docs -max-parallel=10 + ##@ CI .PHONY: ci -ci: environment gh-pages dev ## Run the build for continuous integration. \ No newline at end of file +ci: environment gh-pages dev verify-broken-links ## Run the build and verification for continuous integration. diff --git a/tools/verifybrokenlinks/main.go b/tools/verifybrokenlinks/main.go new file mode 100644 index 00000000..8fbd7cdd --- /dev/null +++ b/tools/verifybrokenlinks/main.go @@ -0,0 +1,209 @@ +package main + +import ( + "flag" + "fmt" + "io/fs" + "net/http" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + "sync/atomic" + "time" +) + +type URLOccurrence struct { + URL string + File string +} + +type URLCheckResult struct { + URL string + IsValid bool + StatusCode int + ErrorMsg string +} + +func main() { + docsDir := flag.String("docs-dir", "docs", "directory to scan for .adoc files") + maxParallel := flag.Int("max-parallel", 10, "maximum number of parallel link checks") + exitOnError := flag.Bool("exit-on-error", true, "exit with non-zero code if broken links are found") + flag.Parse() + + allOccurrences, uniqueURLs := extractLinks(*docsDir) + results := checkLinks(uniqueURLs, *maxParallel, len(uniqueURLs)) + + brokenLinks := make(map[string]URLCheckResult) + for _, result := range results { + if !result.IsValid { + brokenLinks[result.URL] = result + } + } + + for url, result := range brokenLinks { + errorCode := result.ErrorMsg + if result.StatusCode != 0 { + errorCode = fmt.Sprintf("HTTP %d", result.StatusCode) + } + fmt.Printf("❌ %s %s\nFound in:\n", errorCode, url) + for _, occurrence := range allOccurrences { + if occurrence.URL == url { + fmt.Printf("- %s\n", strings.TrimPrefix(occurrence.File, *docsDir+"/")) + } + } + fmt.Println() + } + + rateLimited := make(map[string]URLCheckResult) + actualBroken := make(map[string]URLCheckResult) + for url, result := range brokenLinks { + if result.StatusCode == 429 || result.ErrorMsg == "Rate limited (429)" { + rateLimited[url] = result + } else { + actualBroken[url] = result + } + } + + fmt.Printf("Found %d broken links out of %d total links\n", len(actualBroken), len(uniqueURLs)) + if len(rateLimited) > 0 { + fmt.Printf("Additionally, %d links were rate limited (429)\n", len(rateLimited)) + } + fmt.Println("Done") + + if len(actualBroken) > 0 && *exitOnError { + os.Exit(1) + } else if len(brokenLinks) == 0 { + fmt.Printf("Success: All %d links are valid\n", len(uniqueURLs)) + } +} + +func extractLinks(docsDir string) ([]URLOccurrence, []string) { + var occurrences []URLOccurrence + uniqueURLs := make(map[string]struct{}) + + githubLinkRegex := regexp.MustCompile(`https?://github\.com[-a-zA-Z0-9@:%._\+~#=/]*`) + issuesPRRegex := regexp.MustCompile(`/(?:issues|pull)/[0-9]+`) + componentsRegex := regexp.MustCompile(`/releases/(?:v|tag/v)?[0-9]+\.[0-9]+\.[0-9]+/.*-components\.ya?ml$`) + + filepath.WalkDir(docsDir, func(path string, d fs.DirEntry, err error) error { + if err != nil || d.IsDir() || !strings.HasSuffix(path, ".adoc") { + return err + } + content, err := os.ReadFile(path) + if err != nil { + return err + } + for _, url := range githubLinkRegex.FindAllString(string(content), -1) { + cleaned := trimTrailingPunctuation(url) + if !issuesPRRegex.MatchString(cleaned) && !componentsRegex.MatchString(cleaned) { // Ignore issues, PRs, and component YAML links + occurrences = append(occurrences, URLOccurrence{URL: cleaned, File: path}) + uniqueURLs[cleaned] = struct{}{} + } + } + return nil + }) + + urls := make([]string, 0, len(uniqueURLs)) + for url := range uniqueURLs { + urls = append(urls, url) + } + + return occurrences, urls +} + +func trimTrailingPunctuation(url string) string { + return strings.TrimRight(url, ".,)]>") +} + +func checkLinks(urls []string, maxParallel int, total int) []URLCheckResult { + resultsChan := make(chan URLCheckResult, len(urls)) + sem := make(chan struct{}, maxParallel) + var wg sync.WaitGroup + var progress int32 + + rateLimiter := make(chan struct{}, 1) // Without rate limiting, scripts get blocked by 429 error + go func() { + for { + rateLimiter <- struct{}{} + time.Sleep(100 * time.Millisecond) + } + }() + + client := &http.Client{ + Timeout: 10 * time.Second, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return nil + }, + } + + for _, url := range urls { + wg.Add(1) + sem <- struct{}{} + go func(u string) { + defer wg.Done() + defer func() { <-sem }() + resultsChan <- checkLinkWithRetry(client, rateLimiter, u) + fmt.Fprintf(os.Stderr, "\rProgress: %d/%d links checked", atomic.AddInt32(&progress, 1), total) + }(url) + } + + go func() { + wg.Wait() + close(resultsChan) + }() + + var results []URLCheckResult + for result := range resultsChan { + results = append(results, result) + } + + fmt.Fprintln(os.Stderr) + return results +} + +func checkLinkWithRetry(client *http.Client, rateLimiter <-chan struct{}, url string) URLCheckResult { + result := URLCheckResult{URL: url, IsValid: false} + for attempt := 0; attempt < 3; attempt++ { + if attempt > 0 { + sleep := time.Duration(1<= 200 && resp.StatusCode < 400 { + result.IsValid = true + break + } + if resp.StatusCode == 429 { + if attempt < 2 { + continue + } + result.ErrorMsg = "Rate limited (429)" + } else { + break + } + } + return result +} From cfdf079b0f4398d6ebd707ae8a9870c4fd7a5a50 Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Tue, 29 Apr 2025 17:36:46 +0200 Subject: [PATCH 2/2] Fix all broken links Signed-off-by: Alexandr Demicev --- docs/next/modules/en/pages/security/slsa.adoc | 2 +- docs/next/modules/en/pages/user/clusters.adoc | 4 ++-- .../modules/en/pages/reference-guides/providers/howto.adoc | 4 ++-- .../pages/reference-guides/rancher-turtles-chart/values.adoc | 2 +- docs/v0.10/modules/en/pages/security/slsa.adoc | 2 +- .../modules/en/pages/reference-guides/providers/howto.adoc | 4 ++-- .../pages/reference-guides/rancher-turtles-chart/values.adoc | 2 +- docs/v0.11/modules/en/pages/security/slsa.adoc | 2 +- .../modules/en/pages/reference-guides/providers/howto.adoc | 4 ++-- .../pages/reference-guides/rancher-turtles-chart/values.adoc | 2 +- docs/v0.12/modules/en/pages/security/slsa.adoc | 2 +- .../modules/en/pages/reference-guides/providers/howto.adoc | 2 +- .../pages/reference-guides/rancher-turtles-chart/values.adoc | 2 +- docs/v0.13/modules/en/pages/security/slsa.adoc | 2 +- .../modules/en/pages/reference-guides/providers/howto.adoc | 2 +- .../pages/reference-guides/rancher-turtles-chart/values.adoc | 2 +- docs/v0.14/modules/en/pages/security/slsa.adoc | 2 +- .../modules/en/pages/reference-guides/providers/howto.adoc | 2 +- .../pages/reference-guides/rancher-turtles-chart/values.adoc | 2 +- docs/v0.15/modules/en/pages/security/slsa.adoc | 2 +- .../modules/en/pages/reference-guides/providers/howto.adoc | 2 +- docs/v0.16/modules/en/pages/security/slsa.adoc | 2 +- docs/v0.17/modules/en/pages/security/slsa.adoc | 2 +- docs/v0.17/modules/en/pages/user/clusters.adoc | 4 ++-- docs/v0.18/modules/en/pages/security/slsa.adoc | 2 +- docs/v0.18/modules/en/pages/user/clusters.adoc | 4 ++-- docs/v0.19/modules/en/pages/security/slsa.adoc | 2 +- docs/v0.19/modules/en/pages/user/clusters.adoc | 4 ++-- 28 files changed, 35 insertions(+), 35 deletions(-) diff --git a/docs/next/modules/en/pages/security/slsa.adoc b/docs/next/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/next/modules/en/pages/security/slsa.adoc +++ b/docs/next/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/next/modules/en/pages/user/clusters.adoc b/docs/next/modules/en/pages/user/clusters.adoc index cbb81a88..425a9577 100644 --- a/docs/next/modules/en/pages/user/clusters.adoc +++ b/docs/next/modules/en/pages/user/clusters.adoc @@ -204,7 +204,7 @@ AWS EC2 RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + @@ -287,7 +287,7 @@ Docker Kubeadm:: vSphere RKE2:: + -Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/vmware[CAPRKE2 repository]. +Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/vmware[CAPRKE2 repository]. + To generate the YAML for the cluster, do the following: + diff --git a/docs/v0.10/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.10/modules/en/pages/reference-guides/providers/howto.adoc index 29bbc798..c6aebdcf 100644 --- a/docs/v0.10/modules/en/pages/reference-guides/providers/howto.adoc +++ b/docs/v0.10/modules/en/pages/reference-guides/providers/howto.adoc @@ -43,7 +43,7 @@ AWS RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + @@ -63,7 +63,7 @@ export AWS_REGION="aws-region" export AWS_AMI_ID="ami-id" clusterctl generate cluster cluster1 \ ---from https://github.com/rancher/cluster-api-provider-rke2/blob/main/samples/aws/internal/cluster-template.yaml \ +--from https://github.com/rancher/cluster-api-provider-rke2/blob/release-0.5/samples/aws/internal/cluster-template.yaml \ > cluster1.yaml ---- + diff --git a/docs/v0.10/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc b/docs/v0.10/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc index 63952fd2..c141bfc5 100644 --- a/docs/v0.10/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc +++ b/docs/v0.10/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc @@ -23,7 +23,7 @@ rancherTurtles: rancher-webhook: # an existing rancher installation keeps rancher webhooks after disabling embedded-capi cleanup: true # indicates that the remaining rancher webhooks be removed (default: true) kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.28.0 # indicates the image to use for pre-install cleanup (default: Kubernetes container image registry) - rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other + rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/release-1.5/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other label: true # indicates that the label will be added (default: true) managementv3-cluster: # rancher will use `clusters.management.cattle.io` to represent an imported capi cluster enabled: false # if false, indicates that `clusters.provisioning.cattle.io` resources will be used (default: false) diff --git a/docs/v0.10/modules/en/pages/security/slsa.adoc b/docs/v0.10/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.10/modules/en/pages/security/slsa.adoc +++ b/docs/v0.10/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.11/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.11/modules/en/pages/reference-guides/providers/howto.adoc index 8094efa6..db55a641 100644 --- a/docs/v0.11/modules/en/pages/reference-guides/providers/howto.adoc +++ b/docs/v0.11/modules/en/pages/reference-guides/providers/howto.adoc @@ -43,7 +43,7 @@ AWS RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + @@ -61,7 +61,7 @@ export AWS_SSH_KEY_NAME="aws-ssh-key" export AWS_REGION="aws-region" export AWS_AMI_ID="ami-id" clusterctl generate cluster cluster1 \ ---from https://github.com/rancher/cluster-api-provider-rke2/blob/main/samples/aws/internal/cluster-template.yaml \ +--from https://github.com/rancher/cluster-api-provider-rke2/blob/release-0.5/samples/aws/internal/cluster-template.yaml \ > cluster1.yaml ---- + diff --git a/docs/v0.11/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc b/docs/v0.11/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc index 53e75871..561dbdf8 100644 --- a/docs/v0.11/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc +++ b/docs/v0.11/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc @@ -23,7 +23,7 @@ rancherTurtles: rancher-webhook: # an existing rancher installation keeps rancher webhooks after disabling embedded-capi cleanup: true # indicates that the remaining rancher webhooks be removed (default: true) kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.30.0 # indicates the image to use for pre-install cleanup (default: Kubernetes container image registry) - rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other + rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/release-1.5/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other label: true # indicates that the label will be added (default: true) managementv3-cluster: # rancher will use `clusters.management.cattle.io` to represent an imported capi cluster enabled: false # if false, indicates that `clusters.provisioning.cattle.io` resources will be used (default: false) diff --git a/docs/v0.11/modules/en/pages/security/slsa.adoc b/docs/v0.11/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.11/modules/en/pages/security/slsa.adoc +++ b/docs/v0.11/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.12/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.12/modules/en/pages/reference-guides/providers/howto.adoc index 8094efa6..db55a641 100644 --- a/docs/v0.12/modules/en/pages/reference-guides/providers/howto.adoc +++ b/docs/v0.12/modules/en/pages/reference-guides/providers/howto.adoc @@ -43,7 +43,7 @@ AWS RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + @@ -61,7 +61,7 @@ export AWS_SSH_KEY_NAME="aws-ssh-key" export AWS_REGION="aws-region" export AWS_AMI_ID="ami-id" clusterctl generate cluster cluster1 \ ---from https://github.com/rancher/cluster-api-provider-rke2/blob/main/samples/aws/internal/cluster-template.yaml \ +--from https://github.com/rancher/cluster-api-provider-rke2/blob/release-0.5/samples/aws/internal/cluster-template.yaml \ > cluster1.yaml ---- + diff --git a/docs/v0.12/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc b/docs/v0.12/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc index 53e75871..561dbdf8 100644 --- a/docs/v0.12/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc +++ b/docs/v0.12/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc @@ -23,7 +23,7 @@ rancherTurtles: rancher-webhook: # an existing rancher installation keeps rancher webhooks after disabling embedded-capi cleanup: true # indicates that the remaining rancher webhooks be removed (default: true) kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.30.0 # indicates the image to use for pre-install cleanup (default: Kubernetes container image registry) - rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other + rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/release-1.5/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other label: true # indicates that the label will be added (default: true) managementv3-cluster: # rancher will use `clusters.management.cattle.io` to represent an imported capi cluster enabled: false # if false, indicates that `clusters.provisioning.cattle.io` resources will be used (default: false) diff --git a/docs/v0.12/modules/en/pages/security/slsa.adoc b/docs/v0.12/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.12/modules/en/pages/security/slsa.adoc +++ b/docs/v0.12/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.13/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.13/modules/en/pages/reference-guides/providers/howto.adoc index 752ede87..1da0aa57 100644 --- a/docs/v0.13/modules/en/pages/reference-guides/providers/howto.adoc +++ b/docs/v0.13/modules/en/pages/reference-guides/providers/howto.adoc @@ -43,7 +43,7 @@ AWS RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + diff --git a/docs/v0.13/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc b/docs/v0.13/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc index 53e75871..561dbdf8 100644 --- a/docs/v0.13/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc +++ b/docs/v0.13/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc @@ -23,7 +23,7 @@ rancherTurtles: rancher-webhook: # an existing rancher installation keeps rancher webhooks after disabling embedded-capi cleanup: true # indicates that the remaining rancher webhooks be removed (default: true) kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.30.0 # indicates the image to use for pre-install cleanup (default: Kubernetes container image registry) - rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other + rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/release-1.5/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other label: true # indicates that the label will be added (default: true) managementv3-cluster: # rancher will use `clusters.management.cattle.io` to represent an imported capi cluster enabled: false # if false, indicates that `clusters.provisioning.cattle.io` resources will be used (default: false) diff --git a/docs/v0.13/modules/en/pages/security/slsa.adoc b/docs/v0.13/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.13/modules/en/pages/security/slsa.adoc +++ b/docs/v0.13/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.14/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.14/modules/en/pages/reference-guides/providers/howto.adoc index 752ede87..1da0aa57 100644 --- a/docs/v0.14/modules/en/pages/reference-guides/providers/howto.adoc +++ b/docs/v0.14/modules/en/pages/reference-guides/providers/howto.adoc @@ -43,7 +43,7 @@ AWS RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + diff --git a/docs/v0.14/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc b/docs/v0.14/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc index 53e75871..561dbdf8 100644 --- a/docs/v0.14/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc +++ b/docs/v0.14/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc @@ -23,7 +23,7 @@ rancherTurtles: rancher-webhook: # an existing rancher installation keeps rancher webhooks after disabling embedded-capi cleanup: true # indicates that the remaining rancher webhooks be removed (default: true) kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.30.0 # indicates the image to use for pre-install cleanup (default: Kubernetes container image registry) - rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other + rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/release-1.5/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other label: true # indicates that the label will be added (default: true) managementv3-cluster: # rancher will use `clusters.management.cattle.io` to represent an imported capi cluster enabled: false # if false, indicates that `clusters.provisioning.cattle.io` resources will be used (default: false) diff --git a/docs/v0.14/modules/en/pages/security/slsa.adoc b/docs/v0.14/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.14/modules/en/pages/security/slsa.adoc +++ b/docs/v0.14/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.15/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.15/modules/en/pages/reference-guides/providers/howto.adoc index 752ede87..1da0aa57 100644 --- a/docs/v0.15/modules/en/pages/reference-guides/providers/howto.adoc +++ b/docs/v0.15/modules/en/pages/reference-guides/providers/howto.adoc @@ -43,7 +43,7 @@ AWS RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + diff --git a/docs/v0.15/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc b/docs/v0.15/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc index 53e75871..561dbdf8 100644 --- a/docs/v0.15/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc +++ b/docs/v0.15/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc @@ -23,7 +23,7 @@ rancherTurtles: rancher-webhook: # an existing rancher installation keeps rancher webhooks after disabling embedded-capi cleanup: true # indicates that the remaining rancher webhooks be removed (default: true) kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.30.0 # indicates the image to use for pre-install cleanup (default: Kubernetes container image registry) - rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other + rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/release-1.5/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other label: true # indicates that the label will be added (default: true) managementv3-cluster: # rancher will use `clusters.management.cattle.io` to represent an imported capi cluster enabled: false # if false, indicates that `clusters.provisioning.cattle.io` resources will be used (default: false) diff --git a/docs/v0.15/modules/en/pages/security/slsa.adoc b/docs/v0.15/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.15/modules/en/pages/security/slsa.adoc +++ b/docs/v0.15/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.16/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.16/modules/en/pages/reference-guides/providers/howto.adoc index 752ede87..1da0aa57 100644 --- a/docs/v0.16/modules/en/pages/reference-guides/providers/howto.adoc +++ b/docs/v0.16/modules/en/pages/reference-guides/providers/howto.adoc @@ -43,7 +43,7 @@ AWS RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + diff --git a/docs/v0.16/modules/en/pages/security/slsa.adoc b/docs/v0.16/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.16/modules/en/pages/security/slsa.adoc +++ b/docs/v0.16/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.17/modules/en/pages/security/slsa.adoc b/docs/v0.17/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.17/modules/en/pages/security/slsa.adoc +++ b/docs/v0.17/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.17/modules/en/pages/user/clusters.adoc b/docs/v0.17/modules/en/pages/user/clusters.adoc index fa4fa490..3b9e8e37 100644 --- a/docs/v0.17/modules/en/pages/user/clusters.adoc +++ b/docs/v0.17/modules/en/pages/user/clusters.adoc @@ -291,7 +291,7 @@ AWS EC2 RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + @@ -425,7 +425,7 @@ kubectl create -f cluster1.yaml vSphere RKE2:: + -Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/vmware[CAPRKE2 repository]. +Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/vmware[CAPRKE2 repository]. + To generate the YAML for the cluster, do the following: + diff --git a/docs/v0.18/modules/en/pages/security/slsa.adoc b/docs/v0.18/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.18/modules/en/pages/security/slsa.adoc +++ b/docs/v0.18/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.18/modules/en/pages/user/clusters.adoc b/docs/v0.18/modules/en/pages/user/clusters.adoc index f82d59bd..a53f9e0e 100644 --- a/docs/v0.18/modules/en/pages/user/clusters.adoc +++ b/docs/v0.18/modules/en/pages/user/clusters.adoc @@ -258,7 +258,7 @@ AWS EC2 RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + @@ -398,7 +398,7 @@ kubectl create -f cluster1.yaml vSphere RKE2:: + -Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/vmware[CAPRKE2 repository]. +Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/vmware[CAPRKE2 repository]. + To generate the YAML for the cluster, do the following: + diff --git a/docs/v0.19/modules/en/pages/security/slsa.adoc b/docs/v0.19/modules/en/pages/security/slsa.adoc index dae5fe94..2961e3f1 100644 --- a/docs/v0.19/modules/en/pages/security/slsa.adoc +++ b/docs/v0.19/modules/en/pages/security/slsa.adoc @@ -46,7 +46,7 @@ https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guide * The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners. * The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool]. * The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub. -* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure. +* The provenance signing secrets are ephemeral and are generated through Sigstore's https://docs.sigstore.dev/cosign/signing/overview/[keyless] signing procedure. * The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets. == Isolation diff --git a/docs/v0.19/modules/en/pages/user/clusters.adoc b/docs/v0.19/modules/en/pages/user/clusters.adoc index cbb81a88..425a9577 100644 --- a/docs/v0.19/modules/en/pages/user/clusters.adoc +++ b/docs/v0.19/modules/en/pages/user/clusters.adoc @@ -204,7 +204,7 @@ AWS EC2 RKE2:: + Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI. + -We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider. +We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. + We will use the `internal` one for this guide, however the same steps apply for `external`. + @@ -287,7 +287,7 @@ Docker Kubeadm:: vSphere RKE2:: + -Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/vmware[CAPRKE2 repository]. +Before creating a vSphere+RKE2 workload cluster, it is required to have a VM template with the necessary RKE2 binaries and dependencies. The template should already include RKE2 binaries if operating in an air-gapped environment, following the https://docs.rke2.io/install/airgap#tarball-method[tarball method]. You can find additional configuration details in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/examples/templates/vmware[CAPRKE2 repository]. + To generate the YAML for the cluster, do the following: +