diff --git a/.gitignore b/.gitignore index 3f9ee1f..6f26910 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,9 @@ website/build .vagrant/ Vagrantfile +# Configs +*.hcl + .DS_Store .idea .vscode @@ -78,6 +81,10 @@ tmp/ scripts/custom.sh +# enos +/enos/.enos/* +/enos/enos*.vars.hcl + **/.terraform/* .terraform.lock.hcl *.tfstate diff --git a/Makefile b/Makefile index 0a2d43d..c9d51d0 100644 --- a/Makefile +++ b/Makefile @@ -7,9 +7,69 @@ ifndef $(GOPATH) GOPATH=$(shell go env GOPATH) export GOPATH endif -PLUGIN_DIR ?= $$GOPATH/vault-plugins +PLUGIN_DIR ?= $(GOPATH)/vault-plugins PLUGIN_PATH ?= local-secrets-ldap +# env vars + +#setup ldap server: +LDAP_DOMAIN ?= example.com +LDAP_ORG ?= example +LDAP_ADMIN_PW ?= adminpassword +IMAGE_TAG ?= 1.3.0 +LDAP_HOST ?= 127.0.0.1 +LDAP_PORT ?= 389 +LDIF_PATH ?= $(PWD)/bootstrap/ldif/seed.ldif + +#configure ldap plugin +MAKEFILE_DIR ?= $(PWD) +PLUGIN_SOURCE_TYPE ?= local_build +PLUGIN_DIR_VAULT ?= /etc/vault/plugins +LDAP_URL ?= ldap://127.0.0.1:389 +LDAP_BIND_DN ?= cn=admin,dc=example,dc=com +LDAP_BIND_PASS ?= adminpassword +LDAP_USER_DN ?= ou=users,dc=example,dc=com +LDAP_SCHEMA ?= openldap + +#plugin endpoints tests +ROTATION_PERIOD ?= 10 +ROTATION_WINDOW ?= 3600 +LDAP_DN ?= uid=mary.smith,ou=users,dc=example,dc=com +LDAP_USERNAME ?= mary.smith +LDAP_OLD_PASSWORD ?= defaultpassword +LDIF_PATH ?= $(PWD)/enos/modules/dynamic_role_crud_api/ldif +LDAP_BASE_DN ?= dc=example,dc=com +LIBRARY_SET_NAME ?= staticuser bob.johnson mary.smith +SERVICE_ACCOUNT_NAMES ?= dev-team + +export LDAP_DOMAIN +export LDAP_ORG +export LDAP_ADMIN_PW +export IMAGE_TAG +export LDAP_PORT +export PLUGIN_DIR +export PLUGIN_NAME +export PLUGIN_PATH +export PLUGIN_SOURCE_TYPE +export MAKEFILE_DIR +export PLUGIN_DIR_VAULT +export LDAP_URL +export LDAP_BIND_DN +export LDAP_BIND_PASS +export LDAP_USER_DN +export LDAP_SCHEMA +export LDIF_PATH +export LDAP_HOST +export ROTATION_PERIOD +export ROTATION_WINDOW +export LDAP_DN +export LDAP_USERNAME +export LDAP_OLD_PASSWORD +export LDIF_PATH +export LDAP_BASE_DN +export LIBRARY_SET_NAME +export SERVICE_ACCOUNT_NAMES + .PHONY: default default: dev @@ -48,8 +108,58 @@ fmtcheck: fmt: gofumpt -l -w . -configure: dev - ./bootstrap/configure.sh \ - $(PLUGIN_DIR) \ - $(PLUGIN_NAME) \ - $(PLUGIN_PATH) +.PHONY: setup-env +setup-env: + cd bootstrap && ./setup-openldap.sh + +.PHONY: plugin-build +plugin-build: + cd enos/modules/build_local && ./scripts/plugin-build.sh + +.PHONY: plugin-register +plugin-register: + cd enos/modules/setup_plugin && \ + PLUGIN_BINARY_SRC="$(PLUGIN_DIR)/$(PLUGIN_NAME)" ./scripts/plugin-register.sh + +.PHONY: plugin-enable +plugin-enable: + cd enos/modules/setup_plugin && ./scripts/plugin-enable.sh + +.PHONY: plugin-configure +plugin-configure: + cd enos/modules/configure_plugin/ldap && ./scripts/plugin-configure.sh + +.PHONY: configure +configure: plugin-build plugin-register plugin-enable plugin-configure + +.PHONY: teardown-env +teardown-env: + cd bootstrap && ./teardown-env.sh + +.PHONY: manual-root-rotation-test +manual-root-rotation-test: + cd enos/modules/root_rotation_manual && ./scripts/test-root-rotation-manual.sh + +.PHONY: periodic-root-rotation-test +periodic-root-rotation-test: + cd enos/modules/root_rotation_period && ./scripts/test-root-rotation-period.sh + +.PHONY: scheduled-root-rotation-test +scheduled-root-rotation-test: + cd enos/modules/root_rotation_schedule && ./scripts/test-root-rotation-schedule.sh + +.PHONY: static-role-test +static-role-test: + ROLE_NAME=mary cd enos/modules/static_role_crud_api && ./scripts/static-role.sh + +.PHONY: dynamic-role-test +dynamic-role-test: + ROLE_NAME=adam cd enos/modules/dynamic_role_crud_api && ./scripts/dynamic-role.sh + +.PHONY: library-test +library-test: + cd enos/modules/library_crud_api && ./scripts/library.sh + +.PHONY: teardown-env +teardown-env: + cd bootstrap && ./teardown-env.sh diff --git a/bootstrap/ldif/seed.ldif b/bootstrap/ldif/seed.ldif new file mode 100644 index 0000000..ec7371f --- /dev/null +++ b/bootstrap/ldif/seed.ldif @@ -0,0 +1,41 @@ +# Define Organizational Units +dn: ou=groups,dc=example,dc=com +objectClass: organizationalUnit +ou: groups + +dn: ou=users,dc=example,dc=com +objectClass: organizationalUnit +ou: users + +dn: cn=dev,ou=groups,dc=example,dc=com +objectClass: groupOfUniqueNames +cn: dev +uniqueMember: cn=staticuser,ou=users,dc=example,dc=com +uniqueMember: cn=bob.johnson,ou=users,dc=example,dc=com +uniqueMember: cn=mary.smith,ou=users,dc=example,dc=com +description: Development group + +# Add users for static role rotation +dn: uid=staticuser,ou=users,dc=example,dc=com +objectClass: inetOrgPerson +cn: staticuser +sn: staticuser +uid: staticuser +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: defaultpassword + +dn: uid=bob.johnson,ou=users,dc=example,dc=com +objectClass: inetOrgPerson +cn: bob.johnson +sn: bob.johnson +uid: bob.johnson +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: defaultpassword + +dn: uid=mary.smith,ou=users,dc=example,dc=com +objectClass: inetOrgPerson +cn: mary.smith +sn: mary.smith +uid: mary.smith +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: defaultpassword \ No newline at end of file diff --git a/bootstrap/setup-openldap.sh b/bootstrap/setup-openldap.sh new file mode 100755 index 0000000..7b8e326 --- /dev/null +++ b/bootstrap/setup-openldap.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$LDAP_DOMAIN" ]] && fail "LDAP_DOMAIN env variable has not been set" +[[ -z "$LDAP_ORG" ]] && fail "LDAP_ORG env variable has not been set" +[[ -z "$LDAP_ADMIN_PW" ]] && fail "LDAP_ADMIN_PW env variable has not been set" +[[ -z "$IMAGE_TAG" ]] && fail "IMAGE_TAG env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDIF_PATH" ]] && fail "LDIF_PATH env variable has not been set" + +LDAP_HOSTNAME="${LDAP_HOSTNAME:-openldap}" + +# Determine container runtime: prefer podman if installed, allow override via CONTAINER_RUNTIME +if [[ -n "$CONTAINER_RUNTIME" ]]; then + RUNTIME="$CONTAINER_RUNTIME" +elif command -v podman >/dev/null 2>&1; then + RUNTIME="sudo podman" +else + RUNTIME="sudo docker" +fi + +echo "Using container runtime: $RUNTIME" + +# Pulling image +echo "Pulling image: ${LDAP_DOCKER_NAME}" +LDAP_DOCKER_NAME="docker.io/osixia/openldap:${IMAGE_TAG}" +${RUNTIME} pull "${LDAP_DOCKER_NAME}" + +# Run OpenLDAP container +echo "Starting OpenLDAP container..." +${RUNTIME} run -d \ + --name openldap \ + --hostname "${LDAP_HOSTNAME}" \ + -p "${LDAP_PORT}:${LDAP_PORT}" \ + -p 1636:636 \ + -e LDAP_ORGANISATION="${LDAP_ORG}" \ + -e LDAP_DOMAIN="${LDAP_DOMAIN}" \ + -e LDAP_ADMIN_PASSWORD="${LDAP_ADMIN_PW}" \ + "${LDAP_DOCKER_NAME}" + +echo "OpenLDAP server is now running in container!" + +# Wait for the container to be up and running +echo "Waiting for OpenLDAP to start..." +sleep 5 + +# Check container status +status=$(${RUNTIME} ps --filter name=openldap --format "{{.Status}}") +if [[ -n "$status" ]]; then + echo "OpenLDAP container is running. Status: $status" +else + echo "OpenLDAP container is NOT running!" + echo "Check logs with: ${RUNTIME} logs openldap" + exit 1 +fi + +# Run ldapadd inside the container +${RUNTIME} exec -i openldap ldapadd -x -w "${LDAP_ADMIN_PW}" -D "cn=admin,dc=${LDAP_DOMAIN//./,dc=}" -f /dev/stdin < "${LDIF_PATH}" diff --git a/bootstrap/teardown-env.sh b/bootstrap/teardown-env.sh new file mode 100755 index 0000000..ac73f86 --- /dev/null +++ b/bootstrap/teardown-env.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" +[[ -z "$PLUGIN_DIR" ]] && fail "PLUGIN_DIR env variable has not been set" + +MAKEFILE_DIR="${MAKEFILE_DIR:-$(pwd)}" +PROJECT_BIN_DIR="${MAKEFILE_DIR}/bin" + +echo "[teardown] Stopping and removing openldap docker container if it exists..." +docker rm -f openldap 2>/dev/null || echo "[teardown] No openldap container found." + +# Remove from bin directory +if [ -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[teardown] Removing existing plugin at ${PROJECT_BIN_DIR}/${PLUGIN_NAME}" + rm -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" +fi + +# Remove from destination directory +if [ -f "${PLUGIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[teardown] Removing existing plugin at ${PLUGIN_DIR}/${PLUGIN_NAME}" + rm -f "${PLUGIN_DIR}/${PLUGIN_NAME}" +fi + +echo "[teardown] Teardown complete." \ No newline at end of file diff --git a/enos/Makefile b/enos/Makefile new file mode 100644 index 0000000..bff493f --- /dev/null +++ b/enos/Makefile @@ -0,0 +1,48 @@ +VAULT_VERSION=$$(cat $(CURDIR)/../version/VERSION) + +.PHONY: default +default: check-fmt shellcheck + +.PHONY: check-fmt +check-fmt: check-fmt-enos check-fmt-modules check-shfmt + +.PHONY: fmt +fmt: fmt-enos fmt-modules shfmt + +.PHONY: check-fmt-enos +check-fmt-enos: + enos fmt --check --diff . + enos fmt --check --diff ./k8s + +.PHONY: fmt-enos +fmt-enos: + enos fmt . + enos fmt ./k8s + +.PHONY: check-fmt-modules +check-fmt-modules: + terraform fmt -check -diff -recursive ./modules + +.PHONY: fmt-modules +fmt-modules: + terraform fmt -diff -recursive ./modules + +.PHONY: validate-enos +validate-enos: + enos scenario validate --timeout 30m0s --chdir ./k8s + enos scenario validate --timeout 30m0s + +.PHONY: lint +lint: check-fmt check-fmt-modules check-shfmt shellcheck validate-enos + +.PHONY: shellcheck +shellcheck: + find ./modules/ -type f -name '*.sh' | xargs shellcheck + +.PHONY: shfmt +shfmt: + find ./modules/ -type f -name '*.sh' | xargs shfmt -l -w -i 2 -bn -ci -kp -sr + +.PHONY: check-shfmt +check-shfmt: + find ./modules/ -type f -name '*.sh' | xargs shfmt -l -d -i 2 -bn -ci -kp -sr diff --git a/enos/README.md b/enos/README.md new file mode 100644 index 0000000..83608b5 --- /dev/null +++ b/enos/README.md @@ -0,0 +1,134 @@ +# Enos + +Enos is a quality testing framework that allows composing and executing quality +requirement scenarios as code. For the OpenLDAP secrets engine Vault plugin, +scenarios are currently executable from a developer machine that has the requisite dependencies +and configuration. Future plans include executing scenarios via Github Actions. + +Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) +for further information regarding installation, execution or composing Enos scenarios. + +## Requirements +- AWS access. HashiCorp Vault developers should use Doormat. +- Terraform >= 1.7 +- Enos >= v0.4.0. You can [download a release](https://github.com/hashicorp/enos/releases/) or + install it with Homebrew: + ```shell + brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + ``` +- An SSH keypair in the AWS region you wish to run the scenario. You can use + Doormat to log in to the AWS console to create or upload an existing keypair. +- A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants or from Artifactory when using `artifact_source:artifactory`. +- An OpenLDAP plugin artifact is downloaded from releases when using the `ldap_artifact_source:releases`, from Artifactory when using `ldap_artifact_source:artifactory`, and is built locally from the current branch when using `ldap_artifact_source:local` variant. + +## Scenario Variables +For local execution you can specify all the required variables using environment +variables, or you can update `enos.vars.hcl` with values and uncomment the lines. + +Variables that are required (include): +* `aws_ssh_keypair_name` +* `aws_ssh_private_key_path` +* `vault_bundle_path` +* `vault_license_path` (only required for non-OSS editions) +* `plugin_name` +* `plugin_dir_vault` +* `ldap_bind_pass` +* `ldap_schema` +* `ldap_tag` +* `ldap_base_dn` +* `ldap_user_role_name` +* `ldap_username` +* `ldap_user_old_password` +* `ldap_dynamic_user_role_name` +* `ldap_dynamic_role_ldif_templates_path` +* `ldap_library_set_name` +* `ldap_service_account_names` + +See [enos.vars.hcl](template_enos.vars.hcl) or [enos-variables.hcl](./enos-variables.hcl) +for further descriptions of the variables. + +Additional variable information can also be found in the [Scenario Outlines](#scenario_outlines) + +**[Future Works]** In CI, each scenario should be executed via Github Actions and should be configured using +environment variable inputs that follow the `ENOS_VAR_varname` pattern. + +## Scenario Outlines +Enos is capable of producing an outline of each scenario that is defined in a given directory. These +scenarios often include a description of what behavior the scenario performs, which variants are +available, and which variables are required. They also provide a step by step breakdown including +which quality requirments are verifiend by a given step. + +You can generate outlines of all scenarios or specify one via it's name. + +From the `enos` directory: +```bash +enos scenario outline openldap_smoke +``` + +There are also HTML versions available for an improved reading experience: +```bash +enos scenario outline --format html > index.html +open index.html +``` + +## Executing Scenarios +From the `enos` directory: + +```bash +# List all available scenarios +enos scenario list +# Run the smoke or restart scenario with a Vault artifact from Artifactory and an +# openLDAP secrets engine plugin artifact that is built locally. +# Make sure the local machine has been configured as detailed in the requirements section. +# This will execute the scenario and clean up any resources if successful. +enos scenario run openldap_smoke artifact_source:artifactory ldap_artifact_source:local +enos scenario run openldap_restart artifact_source:artifactory ldap_artifact_source:local +# To run a specific variant of a scenario, you can specify the variant values. +enos scenario run openldap_smoke arch:amd64 artifact_source:artifactory artifact_type:package config_mode:env \ + distro:amzn edition:ent ip_version:4 seal:shamir ldap_artifact_source:local ldap_config_root_rotation_method:manual +# Launch an individual scenario but leave infrastructure up after execution +enos scenario launch openldap_smoke artifact_source:artifactory ldap_artifact_source:local +# Check an individual scenario for validity. This is useful during scenario +# authoring and debugging. +enos scenario validate openldap_smoke artifact_source:artifactory ldap_artifact_source:local +# If you've run the tests and desire to see the outputs, such as the URL or +# credentials, you can run the output command to see them. Please note that +# after "run" or destroy there will be no "outputs" as the infrastructure +# will have been destroyed and state cleared. +enos scenario output openldap_smoke artifact_source:artifactory ldap_artifact_source:local +# Explicitly destroy all existing infrastructure +enos scenario destroy openldap_smoke artifact_source:artifactory ldap_artifact_source:local +``` + +Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) +for further information regarding installation, execution or composing scenarios. + +# Variants +Both scenarios support a matrix of variants. + +## `ldap_artifact_source:local` +This variant is for running the Enos scenario locally. It builds the plugin binary +from the current branch, placing the binary at the `ldap_artifact_path`. + +## `ldap_artifact_source:releases` +This variant is for running the Enos scenario to test an artifact from HashiCorp releases. It requires following Enos variables to be set: +* `ldap_plugin_version` +* `ldap_revision` + +## `ldap_artifact_source:artifactory` +This variant is for running the Enos scenario to test an artifact from Artifactory. It requires following Enos variables to be set: +* `artifactory_username` +* `artifactory_token` +* `aws_ssh_keypair_name` +* `aws_ssh_private_key_path` +* `ldap_plugin_version` +* `ldap_revision` +* `ldap_artifactory_repo` + +Refer to the **Variants** section in the [Vault README on GitHub](https://github.com/hashicorp/vault/blob/main/README.md). +for further information regarding Vault's `artifact_source` matrix variants.
+Note: `artifact_source:local` isn't supported in this project since we never build Vault locally. + +**[Future Work]** In order to achieve broad coverage while +keeping test run time reasonable, the variants executed by the `enos-run` Github +Actions (CI) should use `enos scenario sample` to maximize variant distribution per scenario. \ No newline at end of file diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl new file mode 100644 index 0000000..e99ed00 --- /dev/null +++ b/enos/enos-descriptions.hcl @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +globals { + description = { + bootstrap_vault_cluster_targets = <<-EOF + Installs bootstrap tools (e.g. shasum) on the Vault cluster targets. + EOF + + build_vault = <<-EOF + Determine which Vault artifact we want to use for the scenario. Depending on the + 'artifact_source' variant we'll either build Vault from the local branch, fetch a candidate + build from Artifactory, or use a local artifact that was built in CI via CRT. + EOF + + build_ldap = <<-EOF + Determine which openldap plugin artifact we want to use for the scenario. Depending on the + 'artifact_source' variant we'll either build openldap secrets engine plugin from the local branch or + fetch a candidate build from Artifactory. + EOF + + configure_plugin = <<-EOF + Configure the Vault plugin. + EOF + + create_ldap_server = <<-EOF + Sets up the docker container and ldap server. + EOF + + create_ldap_server_target = <<-EOF + Create the target machines that we'll setup the LDAP server onto. + EOF + + create_seal_key = <<-EOF + Create the necessary seal key infrastructure for Vaults auto-unseal functionality. Depending + on the 'seal' variant this step will perform different actions. When using 'shamir' the step + is a no-op as we won't require an external seal mechanism. When using 'pkcs11' this step will + create a SoftHSM slot and associated token which can be distributed to all target nodes. When + using 'awskms' a new AWSKMS key will be created. The necessary security groups and policies + for Vault target nodes to access it the AWSKMS key are handled in the target modules. + EOF + + create_vault_cluster = <<-EOF + Create the the Vault cluster. In this module we'll install, configure, start, initialize and + unseal all the nodes in the Vault. After initialization it also enables various audit engines. + EOF + + create_vault_cluster_targets = <<-EOF + Create the target machines that we'll install Vault onto. We also handle creating AWS instance + profiles and security groups that allow for auto-discovery via the retry_join functionality in + Consul. The security group firewall rules will automatically allow SSH access from the host + external IP address of the machine executing Enos, in addition to all of the required ports + for Vault to function and be accessible in the VPC. + Note: Consul is not supported for plugin testing with enos. + EOF + + create_vpc = <<-EOF + Create an AWS VPC, internet gateway, default security group, and default subnet that allows + egress traffic via the internet gateway. + EOF + + dynamic_role_crud_api = <<-EOF + Tests the lifecycle of a dynamic role via the Vault CRUD API. + EOF + + ec2_info = <<-EOF + Query various endpoints in AWS Ec2 to gather metadata we'll use later in our run when creating + infrastructure for the Vault cluster. This metadata includes: + - AMI IDs for different Linux distributions and platform architectures + - Available Ec2 Regions + - Availability Zones for our desired machine instance types + EOF + + get_local_metadata = <<-EOF + Performs several Vault quality verification that are dynamically modified based on the Vault + binary version, commit SHA, build-date (commit SHA date), and edition metadata. When we're + testing existing artifacts this expected metadata is passed in via Enos variables. When we're + building a local by using the 'artifact_source:local' variant, this step executes and + populates the expected metadata with that of our branch so that we don't have to update the + Enos variables on each commit. + EOF + + get_vault_cluster_ip_addresses = <<-EOF + Map the public and private IP addresses of the Vault cluster nodes and segregate them by + their leader status. This allows us to easily determine the public IP addresses of the leader + and follower nodes. + EOF + + library_crud_api = <<-EOF + Tests the lifecycle of a dynamic role via the Vault CRUD API. + EOF + + read_vault_license = <<-EOF + When deploying Vault Enterprise, ensure a Vault Enterprise license is present on disk and + read its contents so that we can utilize it when configuring the Vault Enterprise cluster. + Must have the 'edition' variant to be set to any Enterprise edition. + EOF + + restart_all_vault_nodes = <<-EOF + Restart all Vault nodes in the cluster. This is useful for testing the Vault cluster's + resilience to node restarts and ensuring that the cluster can recover and maintain its state. + EOF + + static_role_crud_api = <<-EOF + Tests the lifecycle of a static role via the Vault CRUD API. + EOF + + setup_plugin = <<-EOF + Build, register, and enable the Vault plugin. + EOF + + ldap_config_root_rotation = <<-EOF + Test the LDAP secrets engine's config endpoint root rotation functionality. + EOF + + unseal_vault = <<-EOF + Unseal the Vault cluster using the configured seal mechanism. + EOF + + vault_leader_step_down = <<-EOF + Force the Vault cluster leader to step down which forces the Vault cluster to perform a leader + election. + EOF + + verify_log_secrets = <<-EOF + Verify that the vault audit log and systemd journal do not leak secret values. + EOF + + verify_raft_cluster_all_nodes_are_voters = <<-EOF + When configured with a 'backend:raft' variant, verify that all nodes in the cluster are + healthy and are voters. + EOF + + verify_vault_sealed = <<-EOF + Verify that the Vault cluster has successfully sealed. + EOF + + verify_vault_unsealed = <<-EOF + Verify that the Vault cluster has successfully unsealed. + EOF + + verify_vault_version = <<-EOF + Verify that the Vault CLI has the correct embedded version metadata and that the Vault Cluster + verision history includes our expected version. The CLI metadata that is validated includes + the Vault version, edition, build date, and any special prerelease metadata. + EOF + + wait_for_cluster_to_have_leader = <<-EOF + Wait for a leader election to occur before we proceed with any further quality verification. + EOF + + } +} diff --git a/enos/enos-dev-variables.hcl b/enos/enos-dev-variables.hcl new file mode 100644 index 0000000..f50a39b --- /dev/null +++ b/enos/enos-dev-variables.hcl @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "dev_build_local_ui" { + type = bool + description = "Whether or not to build the web UI when using the local builder var. If the assets have already been built we'll still include them" + default = false +} + +variable "dev_config_mode" { + type = string + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" // or "env" +} diff --git a/enos/enos-dynamic-config.hcl b/enos/enos-dynamic-config.hcl new file mode 100644 index 0000000..0aabdf1 --- /dev/null +++ b/enos/enos-dynamic-config.hcl @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +# Code generated by pipeline generate enos-dynamic-config DO NOT EDIT. + +# This file is overwritten in CI as it contains branch specific and sometimes ever-changing values. +# It's checked in here so that enos samples and scenarios can be performed, just be aware that this +# might change out from under you. + +globals { + sample_attributes = { + aws_region = ["us-east-1", "us-west-2"] + distro_version_amzn = ["2023"] + distro_version_leap = ["15.6"] + distro_version_rhel = ["8.10", "9.5"] + distro_version_sles = ["15.6"] + distro_version_ubuntu = ["20.04", "24.04"] + upgrade_initial_version = ["1.17.0", "1.17.1", "1.17.2", "1.17.3", "1.17.4", "1.17.5", "1.17.6", "1.18.0-rc1", "1.18.0", "1.18.1", "1.18.2", "1.18.3", "1.18.4", "1.18.5", "1.19.0-rc1", "1.19.0", "1.19.1", "1.19.2"] + } +} diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl new file mode 100644 index 0000000..ac2e0bc --- /dev/null +++ b/enos/enos-globals.hcl @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +globals { + archs = ["amd64"] + artifact_sources = ["local", "crt", "artifactory"] + ldap_artifact_sources = ["local", "releases", "artifactory"] + ldap_config_root_rotation_methods = ["period", "schedule", "manual"] + artifact_types = ["bundle", "package"] + backends = ["raft"] + backend_tag_key = "VaultStorage" + build_tags = { + "ce" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1403" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_3", "ent.fips1403"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1403" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_3", "ent.hsm.fips1403"] + } + config_modes = ["env", "file"] + distros = ["amzn", "ubuntu"] + // Different distros may require different packages, or use different aliases for the same package + distro_packages = { + amzn = { + "2" = ["nc", "openldap-clients", "perl-Digest-SHA"] + "2023" = ["nc", "openldap-clients", "perl-Digest-SHA"] + } + ubuntu = { + "22.04" = ["netcat", "ldap-utils", "perl"] + "24.04" = ["netcat-openbsd", "ldap-utils", "perl"] + } + } + distro_version = { + amzn = var.distro_version_amzn + ubuntu = var.distro_version_ubuntu + } + editions = ["ce", "ent", "ent.fips1403", "ent.hsm", "ent.hsm.fips1403"] + enterprise_editions = [for e in global.editions : e if e != "ce"] + ip_versions = ["4", "6"] + package_manager = { + "amzn" = "yum" + "ubuntu" = "apt" + } + packages = ["jq"] + // Ports that we'll open up for ingress in the security group for all target machines. + // Port protocol maps to the IpProtocol schema: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html + ports = { + ssh : { + description = "SSH" + port = 22 + protocol = "tcp" + }, + ldap : { + description = "LDAP" + port = 389 + protocol = "tcp" + }, + ldaps : { + description = "LDAPS" + port = 636 + protocol = "tcp" + }, + vault_agent : { + description = "Vault Agent" + port = 8100 + protocol = "tcp" + }, + vault_proxy : { + description = "Vault Proxy" + port = 8101 + protocol = "tcp" + }, + vault_listener : { + description = "Vault Addr listener" + port = 8200 + protocol = "tcp" + }, + vault_cluster : { + description = "Vault Cluster listener" + port = 8201 + protocol = "tcp" + }, + } + seals = ["awskms", "pkcs11", "shamir"] + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_install_dir = { + bundle = "/opt/vault/bin" + package = "/usr/bin" + } + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "vault-cluster" + ldap_tag_key = "ldap-server-cluster" + vault_disable_mlock = false +} diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl new file mode 100644 index 0000000..50e923d --- /dev/null +++ b/enos/enos-modules.hcl @@ -0,0 +1,225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +module "backend_raft" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/backend_raft?ref=${var.vault_repo_ref}" +} + +// Bootstrap Vault cluster targets +module "bootstrap_vault_cluster_targets" { + source = "./modules/ec2_bootstrap_tools" +} + +// Find any artifact in Artifactory. Requires the version, revision, and edition. +module "build_vault_artifactory" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_artifact?ref=${var.vault_repo_ref}" +} + +// Find any artifact in Artifactory. Requires the version, revision, and edition. +module "build_ldap_artifactory" { + source = "./modules/build_artifactory_artifact" +} + +// Find any released RPM or Deb in Artifactory. Requires the version, edition, distro, and distro +// version. +module "build_vault_artifactory_package" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_package?ref=${var.vault_repo_ref}" +} + +// A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle +// from releases.hashicorp.com. When using a local pre-built artifact it requires the local +// artifact path. When using a release zip it does nothing as you'll need to configure the +// vault_cluster module with release info instead. +module "build_vault_crt" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_crt?ref=${var.vault_repo_ref}" +} + +// A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle +// from releases.hashicorp.com. When using a local pre-built artifact it requires the local +// artifact path. When using a release zip it does nothing as you'll need to configure the +// vault_cluster module with release info instead. +module "build_ldap_releases" { + source = "./modules/build_releases" +} + +// Build the local branch and package it into a zip artifact. Requires the goarch, goos, build tags, +// and bundle path. +module "build_ldap_local" { + source = "./modules/build_local" +} + +// Configure the Vault plugin +module "configure_plugin" { + source = "./modules/configure_plugin/ldap" +} + +// Setup Docker and OpenLDAP on backend server with seed data +module "create_backend_server" { + source = "./modules/backend_servers_setup" +} + +module "create_vpc" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/create_vpc?ref=${var.vault_repo_ref}" + + environment = "ci" + common_tags = var.tags +} + +module "dynamic_role_crud_api" { + source = "./modules/dynamic_role_crud_api" +} + +module "ec2_info" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/ec2_info?ref=${var.vault_repo_ref}" +} + +module "get_local_metadata" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/get_local_metadata?ref=${var.vault_repo_ref}" +} + +module "read_license" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/read_license?ref=${var.vault_repo_ref}" +} + +module "replication_data" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/replication_data?ref=${var.vault_repo_ref}" +} + +module "restart_vault" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/restart_vault?ref=${var.vault_repo_ref}" + vault_install_dir = var.vault_install_dir +} + +module "root_rotation_period" { + source = "./modules/root_rotation_period" +} + +module "root_rotation_schedule" { + source = "./modules/root_rotation_schedule" +} + +module "root_rotation_manual" { + source = "./modules/root_rotation_manual" +} + +module "seal_awskms" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_awskms?ref=${var.vault_repo_ref}" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_shamir" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_shamir?ref=${var.vault_repo_ref}" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_pkcs11" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_pkcs11?ref=${var.vault_repo_ref}" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_vault" { + source = "./modules/seal_vault" +} + +// Register, and enable the Vault plugin +module "setup_plugin" { + source = "./modules/setup_plugin" +} + +module "static_role_crud_api" { + source = "./modules/static_role_crud_api" +} + +module "library_crud_api" { + source = "./modules/library_crud_api" +} + +// create target instances using ec2:RunInstances +module "target_ec2_instances" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_instances?ref=${var.vault_repo_ref}" + + common_tags = var.tags + ports_ingress = values(global.ports) + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +// don't create instances but satisfy the module interface +module "target_ec2_shim" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_shim?ref=${var.vault_repo_ref}" + + common_tags = var.tags + ports_ingress = values(global.ports) + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +module "vault_cluster" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_cluster?ref=${var.vault_repo_ref}" + + install_dir = var.vault_install_dir + consul_license = null + cluster_tag_key = global.vault_tag_key + log_level = var.vault_log_level +} + +module "vault_get_cluster_ips" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_get_cluster_ips?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} + +module "vault_step_down" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_step_down?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} + +module "vault_unseal_replication_followers" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_unseal_replication_followers?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_cluster_sealed" { + source = "./modules/vault_wait_for_cluster_sealed" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_cluster_unsealed" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_cluster_unsealed?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} + +module "verify_log_secrets" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_log_secrets?ref=${var.vault_repo_ref}" + + radar_license_path = var.vault_radar_license_path != null ? abspath(var.vault_radar_license_path) : null +} + +module "vault_verify_raft_auto_join_voter" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_raft_auto_join_voter?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir + vault_cluster_addr_port = global.ports["vault_cluster"]["port"] +} + +module "vault_verify_version" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_version?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_leader" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_leader?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl new file mode 100644 index 0000000..ab745b4 --- /dev/null +++ b/enos/enos-providers.hcl @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +provider "aws" "default" { + region = var.aws_region +} + +// This default SSH user is used in RHEL, Amazon Linux, SUSE, and Leap distros +provider "enos" "ec2_user" { + transport = { + ssh = { + user = "ec2-user" + private_key_path = abspath(var.aws_ssh_private_key_path) + } + } +} + +// This default SSH user is used in the Ubuntu distro +provider "enos" "ubuntu" { + transport = { + ssh = { + user = "ubuntu" + private_key_path = abspath(var.aws_ssh_private_key_path) + } + } +} diff --git a/enos/enos-qualities.hcl b/enos/enos-qualities.hcl new file mode 100644 index 0000000..6ca45e9 --- /dev/null +++ b/enos/enos-qualities.hcl @@ -0,0 +1,220 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +quality "vault_api_sys_config_read" { + description = <<-EOF + The v1/sys/config/sanitized Vault API returns sanitized configuration which matches our given + configuration + EOF +} + +quality "vault_api_sys_ha_status_read" { + description = "The v1/sys/ha-status Vault API returns the HA status of the cluster" +} + +quality "vault_api_sys_health_read" { + description = <<-EOF + The v1/sys/health Vault API returns the correct codes depending on the replication and + 'seal-status' of the cluster + EOF +} + +quality "vault_api_sys_host_info_read" { + description = "The v1/sys/host-info Vault API returns the host info for each node in the cluster" +} + +quality "vault_api_sys_leader_read" { + description = "The v1/sys/leader Vault API returns the cluster leader info" +} + +quality "vault_api_sys_replication_status_read" { + description = <<-EOF + The v1/sys/replication/status Vault API returns the performance replication status of the + cluster + EOF +} + +quality "vault_api_sys_seal_status_api_read_matches_sys_health" { + description = <<-EOF + The v1/sys/seal-status Vault API and v1/sys/health Vault API agree on the health of each node + and the cluster + EOF +} + +quality "vault_api_sys_step_down_steps_down" { + description = <<-EOF + The v1/sys/step-down Vault API forces the cluster leader to step down and intiates a new leader + election + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_configuration_read" { + description = <<-EOF + The /sys/storage/raft/autopilot/configuration Vault API returns the autopilot configuration of + the cluster + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_state_read" { + description = <<-EOF + The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state of the + cluster + EOF +} + +quality "vault_api_sys_storage_raft_configuration_read" { + description = <<-EOF + The v1/sys/storage/raft/configuration Vault API returns the raft configuration of the cluster + EOF +} + +quality "vault_api_sys_version_history_keys" { + description = <<-EOF + The v1/sys/version-history Vault API returns the cluster version history and the 'keys' data + includes our target version + EOF +} + +quality "vault_api_sys_version_history_key_info" { + description = <<-EOF + The v1/sys/version-history Vault API returns the cluster version history and the + 'key_info["$expected_version]' data is present for the expected version and the 'build_date' + matches the expected build_date. + EOF +} + +quality "vault_artifact_bundle" { + description = "The candidate binary packaged as a zip bundle is used for testing" +} + +quality "vault_artifact_deb" { + description = "The candidate binary packaged as a deb package is used for testing" +} + +quality "vault_artifact_rpm" { + description = "The candidate binary packaged as an rpm package is used for testing" +} + +quality "vault_audit_log" { + description = "The Vault audit sub-system is enabled with the log and writes to a log" +} + +quality "vault_audit_log_secrets" { + description = "The Vault audit sub-system does not output secret values" +} + +quality "vault_audit_socket" { + description = "The Vault audit sub-system is enabled with the socket and writes to a socket" +} + +quality "vault_audit_syslog" { + description = "The Vault audit sub-system is enabled with the syslog and writes to syslog" +} + +quality "vault_autojoin_aws" { + description = "Vault auto-joins nodes using AWS tag discovery" +} + +quality "vault_cli_operator_members" { + description = "The 'vault operator members' command returns the expected list of members" +} + +quality "vault_cli_operator_step_down" { + description = "The 'vault operator step-down' command forces the cluster leader to step down" +} + +quality "vault_cli_status_exit_code" { + description = <<-EOF + The 'vault status' command exits with the correct code depending on expected seal status + EOF +} + +quality "vault_config_env_variables" { + description = "Vault starts when configured primarily with environment variables" +} + +quality "vault_config_file" { + description = "Vault starts when configured primarily with a configuration file" +} + +quality "vault_config_log_level" { + description = "The 'log_level' config stanza modifies its log level" +} + +quality "vault_init" { + description = "Vault initializes the cluster with the given seal parameters" +} + +quality "vault_journal_secrets" { + description = "The Vault systemd journal does not output secret values" +} + +quality "vault_license_required_ent" { + description = "Vault Enterprise requires a license in order to start" +} + +quality "vault_listener_ipv4" { + description = "Vault operates on ipv4 TCP listeners" +} + +quality "vault_listener_ipv6" { + description = "Vault operates on ipv6 TCP listeners" +} + +quality "vault_radar_index_create" { + description = "Vault radar is able to create an index from KVv2 mounts" +} + +quality "vault_radar_scan_file" { + description = "Vault radar is able to scan a file for secrets" +} + +quality "vault_raft_voters" { + description = global.description.verify_raft_cluster_all_nodes_are_voters +} + +quality "vault_seal_awskms" { + description = "Vault auto-unseals with the awskms seal" +} + +quality "vault_seal_shamir" { + description = <<-EOF + Vault manually unseals with the shamir seal when given the expected number of 'key_shares' + EOF +} + +quality "vault_seal_pkcs11" { + description = "Vault auto-unseals with the pkcs11 seal" +} + +quality "vault_service_start" { + description = "Vault starts with the configuration" +} + +quality "vault_service_systemd_notified" { + description = "The Vault binary notifies systemd when the service is active" +} + +quality "vault_service_systemd_unit" { + description = "The 'vault.service' systemd unit starts the service" +} + +quality "vault_storage_backend_raft" { + description = "Vault operates using integrated Raft storage" +} + +quality "vault_unseal_ha_leader_election" { + description = "Vault performs a leader election after it is unsealed" +} + +quality "vault_version_build_date" { + description = "Vault's reported build date matches our expectations" +} + +quality "vault_version_edition" { + description = "Vault's reported edition matches our expectations" +} + +quality "vault_version_release" { + description = "Vault's reported release version matches our expectations" +} diff --git a/enos/enos-scenario-openldap-leader-change.hcl b/enos/enos-scenario-openldap-leader-change.hcl new file mode 100644 index 0000000..320f7bd --- /dev/null +++ b/enos/enos-scenario-openldap-leader-change.hcl @@ -0,0 +1,838 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +scenario "openldap_leader_change" { + description = <<-EOF + The scenario verifies that the Vault OpenLDAP secrets engine plugin works correctly after a leader change. + + This scenario creates a Vault cluster with the OpenLDAP secrets engine plugin installed and configured, and starts an OpenLDAP server. + It then tests the plugin by creating static and dynamic roles, verifying that they can be created, read, updated, and deleted via the Vault API. + After that, it forces a Vault leader stepdown followed by a leader election and verifies that the plugin still works correctly after the leader change + + # How to run this scenario + + For general instructions on running a scenario, refer to the Enos docs: https://eng-handbook.hashicorp.services/internal-tools/enos/running-a-scenario/ + For troubleshooting tips and common errors, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/. + + Variables required for all scenario variants: + - aws_ssh_private_key_path (more info about AWS SSH keypairs: https://eng-handbook.hashicorp.services/internal-tools/enos/getting-started/#set-your-aws-key-pair-name-and-private-key) + - aws_ssh_keypair_name + - vault_build_date* + - vault_product_version + - vault_revision* + - ldap_revision* + - plugin_name + - plugin_dir_vault + - ldap_bind_pass + - ldap_schema + - ldap_tag + - ldap_base_dn + - ldap_user_role_name + - ldap_username + - ldap_user_old_password + - ldap_dynamic_user_role_name + - ldap_dynamic_role_ldif_templates_path + - ldap_library_set_name + - ldap_service_account_names + + * If you don't already know what build date and revision you should be using, see + https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. + + Variables required for some scenario variants: + - artifactory_token (if using `artifact_source:artifactory` in your filter) + - aws_region (if different from the default value in enos-variables.hcl) + - distro_version_ (if different from the default version for your target + distro. See supported distros and default versions in the distro_version_ + definitions in enos-variables.hcl) + - vault_artifact_path (the path to where you have a Vault artifact already downloaded, + if using `artifact_source:crt` in your filter) + - vault_license_path (if using an ENT edition of Vault) + - ldap_plugin_version (if using `ldap_artifact_source:releases` or `ldap_artifact_source:artifactory` in your filter) + - ldap_artifactory_repo (if using `ldap_artifact_source:artifactory` in your filter) + - ldap_rotation_period (if using `ldap_config_root_rotation_method:period` in your filter) + - ldap_rotation_window (if using `ldap_config_root_rotation_method:schedule` in your filter) + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + ldap_artifact_source = global.ldap_artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + ldap_artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1403. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + } + + step "build_vault" { + description = global.description.build_vault + module = "build_vault_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = null + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "bootstrap_vault_cluster_targets" { + description = global.description.bootstrap_vault_cluster_targets + module = module.bootstrap_vault_cluster_targets + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + unseal_keys = step.create_vault_cluster.unseal_keys_b64 + threshold = step.create_vault_cluster.unseal_threshold + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster, + step.bootstrap_vault_cluster_targets] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.verify_vault_unsealed, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "build_ldap" { + description = global.description.build_ldap + module = "build_ldap_${matrix.ldap_artifact_source}" + + variables { + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.ldap_artifact_source == "artifactory" ? var.plugin_artifactory_repo : null + artifactory_token = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.ldap_artifact_source == "artifactory" ? matrix.arch : null + artifact_type = matrix.ldap_artifact_source == "artifactory" ? "bundle" : null + product_version = var.ldap_plugin_version + revision = var.ldap_revision + plugin_name = var.plugin_name + makefile_dir = matrix.ldap_artifact_source == "local" ? var.makefile_dir : null + plugin_dest_dir = matrix.ldap_artifact_source == "local" ? var.plugin_dest_dir : null + } + } + + step "create_ldap_server_target" { + description = global.description.create_ldap_server_target + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider["ubuntu"] + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["24.04"] + cluster_tag_key = global.ldap_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.id + instance_count = 1 + } + } + + step "create_ldap_server" { + description = global.description.create_ldap_server + module = module.create_backend_server + depends_on = [step.create_ldap_server_target] + + providers = { + enos = local.enos_provider["ubuntu"] + } + + variables { + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + packages = concat(global.packages, global.distro_packages["ubuntu"]["24.04"], ["podman", "podman-docker"]) + ports = global.ports + } + } + + step "setup_plugin" { + description = global.description.setup_plugin + module = module.setup_plugin + depends_on = [ + step.get_vault_cluster_ips, + step.create_ldap_server, + step.verify_vault_unsealed, + step.build_ldap + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.ldap_artifact_source == "artifactory" ? step.build_ldap.ldap_artifactory_release : null + release = matrix.ldap_artifact_source == "releases" ? { version = var.ldap_plugin_version, edition = "ce" } : null + hosts = step.create_vault_cluster_targets.hosts + local_artifact_path = matrix.ldap_artifact_source == "local" ? local.ldap_artifact_path : null + + + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_name = var.plugin_name + plugin_dir_vault = var.plugin_dir_vault + plugin_mount_path = var.plugin_mount_path + } + } + + step "configure_plugin" { + description = global.description.configure_plugin + module = module.configure_plugin + depends_on = [step.setup_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_schema = var.ldap_schema + } + } + + step "test_static_role_crud_api" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "test_library_crud_api" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.configure_plugin, + step.test_static_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names + } + } + + step "verify_log_secrets" { + skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets + + description = global.description.verify_log_secrets + module = module.verify_log_secrets + depends_on = [ + step.verify_vault_unsealed, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api, + step.test_library_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_audit_log_secrets, + quality.vault_journal_secrets, + quality.vault_radar_index_create, + quality.vault_radar_scan_file, + ] + + variables { + audit_log_file_path = step.create_vault_cluster.audit_device_file_path + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + description = global.description.vault_leader_step_down + module = module.vault_step_down + depends_on = [ + step.get_vault_cluster_ips, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api, + step.test_library_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_step_down_steps_down, + quality.vault_cli_operator_step_down, + ] + + variables { + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader_after_step_down" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_step_down, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips_after_step_down" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed_after_step_down" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_leader_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "test_static_role_crud_api_after_step_down" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.verify_vault_unsealed_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_step_down.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api_after_step_down" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.verify_vault_unsealed_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_step_down.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "test_library_crud_api_after_step_down" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.verify_vault_unsealed_after_step_down, + step.test_static_role_crud_api_after_step_down + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_step_down.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-openldap-restart.hcl b/enos/enos-scenario-openldap-restart.hcl new file mode 100644 index 0000000..ddc8733 --- /dev/null +++ b/enos/enos-scenario-openldap-restart.hcl @@ -0,0 +1,844 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +scenario "openldap_restart" { + description = <<-EOF + The scenario verifies that the Vault OpenLDAP secrets engine plugin works correctly after a restart of the Vault cluster. + + This scenario creates a Vault cluster with the OpenLDAP secrets engine plugin installed and configured, and starts an OpenLDAP server. + It then tests the plugin by creating static and dynamic roles, verifying that they can be created, read, updated, and deleted via the Vault API. + After that, it restarts all Vault nodes and verifies that the plugin still works correctly after the restart. + + # How to run this scenario + + For general instructions on running a scenario, refer to the Enos docs: https://eng-handbook.hashicorp.services/internal-tools/enos/running-a-scenario/ + For troubleshooting tips and common errors, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/. + + Variables required for all scenario variants: + - aws_ssh_private_key_path (more info about AWS SSH keypairs: https://eng-handbook.hashicorp.services/internal-tools/enos/getting-started/#set-your-aws-key-pair-name-and-private-key) + - aws_ssh_keypair_name + - vault_build_date* + - vault_product_version + - vault_revision* + - ldap_revision* + - plugin_name + - plugin_dir_vault + - ldap_bind_pass + - ldap_schema + - ldap_tag + - ldap_base_dn + - ldap_user_role_name + - ldap_username + - ldap_user_old_password + - ldap_dynamic_user_role_name + - ldap_dynamic_role_ldif_templates_path + - ldap_library_set_name + - ldap_service_account_names + + * If you don't already know what build date and revision you should be using, see + https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. + + Variables required for some scenario variants: + - artifactory_token (if using `artifact_source:artifactory` in your filter) + - aws_region (if different from the default value in enos-variables.hcl) + - distro_version_ (if different from the default version for your target + distro. See supported distros and default versions in the distro_version_ + definitions in enos-variables.hcl) + - vault_artifact_path (the path to where you have a Vault artifact already downloaded, + if using `artifact_source:crt` in your filter) + - vault_license_path (if using an ENT edition of Vault) + - ldap_plugin_version (if using `ldap_artifact_source:releases` or `ldap_artifact_source:artifactory` in your filter) + - ldap_artifactory_repo (if using `ldap_artifact_source:artifactory` in your filter) + - ldap_rotation_period (if using `ldap_config_root_rotation_method:period` in your filter) + - ldap_rotation_window (if using `ldap_config_root_rotation_method:schedule` in your filter) + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + ldap_artifact_source = global.ldap_artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + ldap_artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1403. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + } + + step "build_vault" { + description = global.description.build_vault + module = "build_vault_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = null + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "bootstrap_vault_cluster_targets" { + description = global.description.bootstrap_vault_cluster_targets + module = module.bootstrap_vault_cluster_targets + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + unseal_keys = step.create_vault_cluster.unseal_keys_b64 + threshold = step.create_vault_cluster.unseal_threshold + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster, + step.bootstrap_vault_cluster_targets] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.verify_vault_unsealed, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "build_ldap" { + description = global.description.build_ldap + module = "build_ldap_${matrix.ldap_artifact_source}" + + variables { + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.ldap_artifact_source == "artifactory" ? var.plugin_artifactory_repo : null + artifactory_token = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.ldap_artifact_source == "artifactory" ? matrix.arch : null + artifact_type = matrix.ldap_artifact_source == "artifactory" ? "bundle" : null + product_version = var.ldap_plugin_version + revision = var.ldap_revision + plugin_name = var.plugin_name + makefile_dir = matrix.ldap_artifact_source == "local" ? var.makefile_dir : null + plugin_dest_dir = matrix.ldap_artifact_source == "local" ? var.plugin_dest_dir : null + } + } + + step "create_ldap_server_target" { + description = global.description.create_ldap_server_target + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider["ubuntu"] + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["24.04"] + cluster_tag_key = global.ldap_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.id + instance_count = 1 + } + } + + step "create_ldap_server" { + description = global.description.create_ldap_server + module = module.create_backend_server + depends_on = [step.create_ldap_server_target] + + providers = { + enos = local.enos_provider["ubuntu"] + } + + variables { + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + packages = concat(global.packages, global.distro_packages["ubuntu"]["24.04"], ["podman", "podman-docker"]) + ports = global.ports + } + } + + step "setup_plugin" { + description = global.description.setup_plugin + module = module.setup_plugin + depends_on = [ + step.get_vault_cluster_ips, + step.create_ldap_server, + step.verify_vault_unsealed, + step.build_ldap + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.ldap_artifact_source == "artifactory" ? step.build_ldap.ldap_artifactory_release : null + release = matrix.ldap_artifact_source == "releases" ? { version = var.ldap_plugin_version, edition = "ce" } : null + hosts = step.create_vault_cluster_targets.hosts + local_artifact_path = matrix.ldap_artifact_source == "local" ? local.ldap_artifact_path : null + + + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_name = var.plugin_name + plugin_dir_vault = var.plugin_dir_vault + plugin_mount_path = var.plugin_mount_path + } + } + + step "configure_plugin" { + description = global.description.configure_plugin + module = module.configure_plugin + depends_on = [step.setup_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_schema = var.ldap_schema + } + } + + step "test_static_role_crud_api" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "test_library_crud_api" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.configure_plugin, + step.test_static_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names + } + } + + step "verify_log_secrets" { + skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets + + description = global.description.verify_log_secrets + module = module.verify_log_secrets + depends_on = [ + step.verify_vault_unsealed, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_audit_log_secrets, + quality.vault_journal_secrets, + quality.vault_radar_index_create, + quality.vault_radar_scan_file, + ] + + variables { + audit_log_file_path = step.create_vault_cluster.audit_device_file_path + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "restart_all_vault_nodes" { + description = global.description.restart_all_vault_nodes + module = module.restart_vault + depends_on = [ + step.get_vault_cluster_ips, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api, + step.test_library_crud_api, + step.verify_raft_auto_join_voter + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_sealed_after_restart" { + description = global.description.verify_vault_sealed + module = module.vault_wait_for_cluster_sealed + depends_on = [ + step.restart_all_vault_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "unseal_vault" { + description = global.description.unseal_vault + module = module.vault_unseal_replication_followers + depends_on = [step.verify_vault_sealed_after_restart] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_seal_type = matrix.seal + vault_unseal_keys = step.create_vault_cluster.unseal_keys_hex + } + } + + step "verify_vault_unsealed_after_restart" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.unseal_vault] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "get_vault_cluster_ips_after_restart" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.verify_vault_unsealed_after_restart] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "test_static_role_crud_api_after_restart" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.get_vault_cluster_ips_after_restart] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_restart.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api_after_restart" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [ + step.get_vault_cluster_ips_after_restart + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_restart.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "test_library_crud_api_after_restart" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.get_vault_cluster_ips_after_restart, + step.test_static_role_crud_api_after_restart + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_restart.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} \ No newline at end of file diff --git a/enos/enos-scenario-openldap-smoke.hcl b/enos/enos-scenario-openldap-smoke.hcl new file mode 100644 index 0000000..c630249 --- /dev/null +++ b/enos/enos-scenario-openldap-smoke.hcl @@ -0,0 +1,694 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +scenario "openldap_smoke" { + description = <<-EOF + The scenario deploys a Vault cluster and a test OpenLDAP server to act as the LDAP backend for integration. + It enables and configures the OpenLDAP secrets engine plugin in Vault, connecting it to the deployed LDAP server, then + performs plugin configuration and usage tests to verify correct integration and expected functionality of the secrets engine. + + This scenario validates that the Vault OpenLDAP secrets engine plugin works as expected after a fresh installation, + covering plugin setup, configuration, and end-to-end workflow testing of the static roles, and dynamic roles API endpoints + + # How to run this scenario + + For general instructions on running a scenario, refer to the Enos docs: https://eng-handbook.hashicorp.services/internal-tools/enos/running-a-scenario/ + For troubleshooting tips and common errors, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/. + + Variables required for all scenario variants: + - aws_ssh_private_key_path (more info about AWS SSH keypairs: https://eng-handbook.hashicorp.services/internal-tools/enos/getting-started/#set-your-aws-key-pair-name-and-private-key) + - aws_ssh_keypair_name + - vault_build_date* + - vault_product_version + - vault_revision* + - ldap_revision* + - plugin_name + - plugin_dir_vault + - ldap_bind_pass + - ldap_schema + - ldap_tag + - ldap_base_dn + - ldap_user_role_name + - ldap_username + - ldap_user_old_password + - ldap_dynamic_user_role_name + - ldap_dynamic_role_ldif_templates_path + - ldap_library_set_name + - ldap_service_account_names + + * If you don't already know what build date and revision you should be using, see + https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. + + Variables required for some scenario variants: + - artifactory_token (if using `artifact_source:artifactory` in your filter) + - aws_region (if different from the default value in enos-variables.hcl) + - distro_version_ (if different from the default version for your target + distro. See supported distros and default versions in the distro_version_ + definitions in enos-variables.hcl) + - vault_artifact_path (the path to where you have a Vault artifact already downloaded, + if using `artifact_source:crt` in your filter) + - vault_license_path (if using an ENT edition of Vault) + - ldap_plugin_version (if using `ldap_artifact_source:releases` or `ldap_artifact_source:artifactory` in your filter) + - ldap_artifactory_repo (if using `ldap_artifact_source:artifactory` in your filter) + - ldap_rotation_period (if using `ldap_config_root_rotation_method:period` in your filter) + - ldap_rotation_window (if using `ldap_config_root_rotation_method:schedule` in your filter) + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + ldap_artifact_source = global.ldap_artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + ldap_config_root_rotation_method = global.ldap_config_root_rotation_methods + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + ldap_artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1403. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // rotation manager capabilities not supported in Vault community edition + exclude { + edition = ["ce"] + ldap_config_root_rotation_method = ["period", "schedule"] + } + + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + } + + step "build_vault" { + description = global.description.build_vault + module = "build_vault_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = null + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "bootstrap_vault_cluster_targets" { + description = global.description.bootstrap_vault_cluster_targets + module = module.bootstrap_vault_cluster_targets + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + unseal_keys = step.create_vault_cluster.unseal_keys_b64 + threshold = step.create_vault_cluster.unseal_threshold + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster, + step.bootstrap_vault_cluster_targets] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.verify_vault_unsealed, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "build_ldap" { + description = global.description.build_ldap + module = "build_ldap_${matrix.ldap_artifact_source}" + + variables { + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.ldap_artifact_source == "artifactory" ? var.plugin_artifactory_repo : null + artifactory_token = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.ldap_artifact_source == "artifactory" ? matrix.arch : null + artifact_type = matrix.ldap_artifact_source == "artifactory" ? "bundle" : null + product_version = var.ldap_plugin_version + revision = var.ldap_revision + plugin_name = var.plugin_name + makefile_dir = matrix.ldap_artifact_source == "local" ? var.makefile_dir : null + plugin_dest_dir = matrix.ldap_artifact_source == "local" ? var.plugin_dest_dir : null + } + } + + step "create_ldap_server_target" { + description = global.description.create_ldap_server_target + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider["ubuntu"] + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["24.04"] + cluster_tag_key = global.ldap_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.id + instance_count = 1 + } + } + + step "create_ldap_server" { + description = global.description.create_ldap_server + module = module.create_backend_server + depends_on = [step.create_ldap_server_target] + + providers = { + enos = local.enos_provider["ubuntu"] + } + + variables { + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + packages = concat(global.packages, global.distro_packages["ubuntu"]["24.04"], ["podman", "podman-docker"]) + ports = global.ports + } + } + + step "setup_plugin" { + description = global.description.setup_plugin + module = module.setup_plugin + depends_on = [ + step.get_vault_cluster_ips, + step.create_ldap_server, + step.verify_vault_unsealed, + step.build_ldap + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.ldap_artifact_source == "artifactory" ? step.build_ldap.ldap_artifactory_release : null + release = matrix.ldap_artifact_source == "releases" ? { version = var.ldap_plugin_version, edition = "ce" } : null + hosts = step.create_vault_cluster_targets.hosts + local_artifact_path = matrix.ldap_artifact_source == "local" ? local.ldap_artifact_path : null + + + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_name = var.plugin_name + plugin_dir_vault = var.plugin_dir_vault + plugin_mount_path = var.plugin_mount_path + } + } + + step "configure_plugin" { + description = global.description.configure_plugin + module = module.configure_plugin + depends_on = [step.setup_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_schema = var.ldap_schema + } + } + + step "test_ldap_config_root_rotation" { + description = global.description.ldap_config_root_rotation + module = "root_rotation_${matrix.ldap_config_root_rotation_method}" + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + + rotation_period = matrix.ldap_config_root_rotation_method == "period" ? var.ldap_rotation_period : null + rotation_window = matrix.ldap_config_root_rotation_method == "schedule" ? var.ldap_rotation_window : null + } + } + + step "test_static_role_crud_api" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.test_ldap_config_root_rotation] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.test_ldap_config_root_rotation] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "test_library_crud_api" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.test_ldap_config_root_rotation, + step.test_static_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names + } + } + + step "verify_log_secrets" { + skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets + + description = global.description.verify_log_secrets + module = module.verify_log_secrets + depends_on = [ + step.verify_vault_unsealed, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api, + step.test_library_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_audit_log_secrets, + quality.vault_journal_secrets, + quality.vault_radar_index_create, + quality.vault_radar_scan_file, + ] + + variables { + audit_log_file_path = step.create_vault_cluster.audit_device_file_path + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl new file mode 100644 index 0000000..085c1de --- /dev/null +++ b/enos/enos-terraform.hcl @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +terraform_cli "default" { + plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null +} + +terraform_cli "dev" { + plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null + + provider_installation { + dev_overrides = { + "registry.terraform.io/hashicorp-forge/enos" = try(abspath("../../terraform-provider-enos/dist"), null) + } + direct {} + } +} + +terraform "default" { + required_version = ">= 1.7.0" + + required_providers { + aws = { + source = "hashicorp/aws" + } + + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl new file mode 100644 index 0000000..3b0e2b7 --- /dev/null +++ b/enos/enos-variables.hcl @@ -0,0 +1,325 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "artifactory_token" { + type = string + description = "The token to use when authenticating to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-crt-stable-local*" +} + +variable "aws_region" { + description = "The AWS region where we'll create infrastructure" + type = string + default = "us-east-1" +} + +variable "aws_ssh_keypair_name" { + description = "The AWS keypair to use for SSH" + type = string + default = "enos-ci-ssh-key" +} + +variable "aws_ssh_private_key_path" { + description = "The path to the AWS keypair private key" + type = string + default = "./support/private_key.pem" +} + +variable "distro_version_amzn" { + description = "The version of Amazon Linux 2 to use" + type = string + default = "2023" // or "2", though pkcs11 has not been tested with 2 +} + +variable "distro_version_leap" { + description = "The version of openSUSE leap to use" + type = string + default = "15.6" +} + +variable "distro_version_rhel" { + description = "The version of RHEL to use" + type = string + default = "9.5" // or "8.10" +} + +variable "distro_version_sles" { + description = "The version of SUSE SLES to use" + type = string + default = "15.6" +} + +variable "distro_version_ubuntu" { + description = "The version of ubuntu to use" + type = string + default = "24.04" // or "20.04", "22.04" +} + +variable "ldap_dynamic_role_ldif_templates_path" { + description = "LDIF templates path for dynamic role CRUD API tests" + default = "/tmp" +} + +variable "ldap_artifact_path" { + description = "Path to CRT generated or local vault.zip bundle" + type = string + default = "/tmp/vault-plugin-secrets-openldap.zip" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix" + default = "dc=example,dc=com" +} + +variable "ldap_bind_pass" { + description = "LDAP bind password" + type = string + default = null +} + +variable "ldap_disable_automated_rotation" { + type = bool + default = false + description = "Enterprise: cancel upcoming rotations until unset" +} + +variable "ldap_dynamic_user_role_name" { + description = "The name of the LDAP dynamic user role to create" + type = string + default = "adam" +} + +variable "ldap_plugin_version" { + description = "LDAP plugin version to use" + type = string + default = null +} + +variable "ldap_revision" { + description = "The git sha of LDAP plugin artifact we are testing" + type = string + default = null +} + +variable "ldap_rotation_period" { + type = number + default = 0 + description = "Enterprise: time in seconds before rotating the LDAP secret engine root credential. 0 disables rotation" +} + +variable "ldap_rotation_window" { + type = number + default = 0 + description = "Enterprise: max time in seconds to complete scheduled rotation" +} + +variable "ldap_schema" { + description = "LDAP schema type" + type = string + default = "openldap" +} + +variable "ldap_tag" { + description = "LDAP image tag version" + type = string + default = "1.3.0" +} + +variable "ldap_username" { + description = "The username of the LDAP user to create" + type = string + default = "mary.smith" +} + +variable "ldap_user_old_password" { + description = "The old password of the LDAP user to create" + type = string + default = "defaultpassword" +} + +variable "ldap_user_role_name" { + description = "The name of the LDAP user role to create" + type = string + default = "mary" +} + +variable "ldap_library_set_name" { + description = "The name of the library set to use for library CRUD API tests" + type = string + default = "dev-team" +} + +variable "makefile_dir" { + description = "Directory containing the Makefile for plugin build" + type = string + default = null +} + +variable "plugin_artifactory_repo" { + type = string + description = "The artifactory repo to search for vault plugin artifacts" + default = "hashicorp-vault-ecosystem-staging-local" +} + +variable "plugin_dest_dir" { + description = "Destination directory for the plugin binary" + type = string + default = null +} + +variable "plugin_dir_vault" { + description = "Vault server plugin directory" + type = string + default = "/etc/vault/plugins" +} + +variable "plugin_mount_path" { + description = "Mount path for the plugin in Vault" + type = string + default = null +} + +variable "plugin_name" { + description = "Name of the Vault plugin to use" + type = string + default = null +} + +variable "project_name" { + description = "The description of the project" + type = string + default = "vault-plugin-secrets-openldap-enos-integration" +} + +variable "ldap_service_account_names" { + description = "List of service account names to create for library CRUD API tests" + type = list(string) + default = ["staticuser", "bob.johnson", "mary.smith"] +} + +variable "tags" { + description = "Tags that will be applied to infrastructure resources that support tagging" + type = map(string) + default = null +} + +variable "terraform_plugin_cache_dir" { + description = "The directory to cache Terraform modules and providers" + type = string + default = null +} + +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" + default = null +} + +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true +} + +variable "vault_artifact_type" { + description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or .rpm package and 'bundle' for .zip bundles" + default = "bundle" +} + +variable "vault_artifact_path" { + description = "Path to CRT generated or local vault.zip bundle" + type = string + default = "/tmp/vault.zip" +} + +variable "vault_build_date" { + description = "The build date for Vault artifact" + type = string + default = "" +} + +variable "vault_enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "vault_instance_count" { + description = "How many instances to create for the Vault cluster" + type = number + default = 3 +} + +variable "vault_license_path" { + description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions" + type = string + default = null +} + +variable "vault_local_build_tags" { + description = "The build tags to pass to the Go compiler for builder:local variants" + type = list(string) + default = null +} + +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string + default = "trace" +} + +variable "vault_product_version" { + description = "The version of Vault we are testing" + type = string + default = null +} + +variable "vault_radar_license_path" { + description = "The license for vault-radar which is used to verify the audit log" + type = string + default = null +} + +variable "vault_repo_ref" { + description = "The Git ref to use for external modules; can be pinned to a specific SHA" + type = string + default = "main" +} + +variable "vault_revision" { + description = "The git sha of Vault artifact we are testing" + type = string + default = null +} + +variable "verify_aws_secrets_engine" { + description = "If true we'll verify AWS secrets engines behavior. Because of user creation restrictions in Doormat AWS accounts, only turn this on for CI, as it depends on resources that exist only in those accounts" + type = bool + default = false +} + +variable "verify_log_secrets" { + description = "If true and var.vault_enable_audit_devices is true we'll verify that the audit log does not contain unencrypted secrets. Requires var.vault_radar_license_path to be set to a valid license file." + type = bool + default = false +} \ No newline at end of file diff --git a/enos/modules/backend_servers_setup/main.tf b/enos/modules/backend_servers_setup/main.tf new file mode 100644 index 0000000..eec62db --- /dev/null +++ b/enos/modules/backend_servers_setup/main.tf @@ -0,0 +1,67 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + ldap_server = { + domain = "example.com" + org = "example" + admin_pw = "adminpassword" + tag = var.ldap_tag + port = var.ports.ldap.port + secure_port = var.ports.ldaps.port + ip_address = var.hosts[0].public_ip + private_ip = var.hosts[0].private_ip + } + ldif_path = "/tmp/seed.ldif" +} + +# Step 1: We run install_packages +module "install_packages" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/install_packages" + hosts = var.hosts + packages = var.packages +} + +# Step 2: Copy LDIF file for seeding LDAP +resource "enos_file" "seed_ldif" { + depends_on = [module.install_packages] + + source = abspath("${path.module}/../../../bootstrap/ldif/seed.ldif") + destination = local.ldif_path + + transport = { + ssh = { + host = local.ldap_server.ip_address + } + } +} + +# Step 3: Start OpenLDAP Docker container and seed data +resource "enos_remote_exec" "setup_openldap" { + depends_on = [enos_file.seed_ldif] + + environment = { + LDAP_DOMAIN = local.ldap_server.domain + LDAP_ORG = local.ldap_server.org + LDAP_ADMIN_PW = local.ldap_server.admin_pw + IMAGE_TAG = local.ldap_server.tag + LDAP_PORT = local.ldap_server.port + LDIF_PATH = local.ldif_path + } + + scripts = [abspath("${path.module}/../../../bootstrap/setup-openldap.sh")] + + transport = { + ssh = { + host = local.ldap_server.ip_address + } + } +} \ No newline at end of file diff --git a/enos/modules/backend_servers_setup/outputs.tf b/enos/modules/backend_servers_setup/outputs.tf new file mode 100644 index 0000000..7b9c4ef --- /dev/null +++ b/enos/modules/backend_servers_setup/outputs.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "state" { + value = { + ldap = local.ldap_server + } +} + +output "ldap_ip_address" { + value = local.ldap_server.private_ip +} + +output "ldap_port" { + value = local.ldap_server.port +} \ No newline at end of file diff --git a/enos/modules/backend_servers_setup/variables.tf b/enos/modules/backend_servers_setup/variables.tf new file mode 100644 index 0000000..af1c2e3 --- /dev/null +++ b/enos/modules/backend_servers_setup/variables.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "ldap_tag" { + type = string + description = "OpenLDAP Server Version to use" + default = "1.5.0" +} + +variable "ports" { + description = "Port configuration for services" + type = map(object({ + port = string + description = string + })) +} + +variable "packages" { + type = list(string) + description = "A list of packages to install via the target host package manager" + default = [] +} + +variable "vault_repo_ref" { + type = string + description = "The reference to use for the Vault repository" + default = "main" +} \ No newline at end of file diff --git a/enos/modules/build_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf new file mode 100644 index 0000000..9c73d44 --- /dev/null +++ b/enos/modules/build_artifactory_artifact/main.tf @@ -0,0 +1,109 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.6.1" + } + } +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-vault-ecosystem-staging-local" +} + +variable "product_name" { + type = string + description = "The name of the product for which the plugin is built" + default = "vault-plugin-secrets-openldap" +} + +variable "plugin_name" { + type = string + description = "Name of the plugin" +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "arch" {} +variable "artifact_type" {} +variable "artifact_path" { default = null } +variable "revision" {} +variable "product_version" {} +variable "bundle_path" { default = null } +variable "plugin_dest_dir" { default = null } +variable "makefile_dir" { default = null } + +locals { + // Compose zip filename: plugin_name_version_goos_goarch.zip + artifact_name = "${var.plugin_name}_${var.product_version}_${var.goos}_${var.goarch}.zip" +} + +data "enos_artifactory_item" "ldap" { + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.artifactory_repo + path = "${var.product_name}/*" + properties = tomap({ + "commit" = var.revision, + "product-name" = var.product_name, + "product-version" = var.product_version, + }) +} + +output "url" { + value = data.enos_artifactory_item.ldap.results[0].url + description = "Artifactory download URL for the LDAP plugin zip" +} + +output "sha256" { + value = data.enos_artifactory_item.ldap.results[0].sha256 + description = "SHA256 checksum of the LDAP plugin zip" +} + +output "size" { + value = data.enos_artifactory_item.ldap.results[0].size + description = "Size in bytes of the LDAP plugin zip" +} + +output "name" { + value = data.enos_artifactory_item.ldap.results[0].name + description = "Name of the LDAP plugin artifact" +} + +output "ldap_artifactory_release" { + value = { + url = data.enos_artifactory_item.ldap.results[0].url + sha256 = data.enos_artifactory_item.ldap.results[0].sha256 + token = var.artifactory_token + username = null + } +} \ No newline at end of file diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf new file mode 100644 index 0000000..f7b983c --- /dev/null +++ b/enos/modules/build_local/main.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "plugin_dest_dir" { + description = "Where to create the zip bundle of the Plugin build" +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "plugin_name" { + type = string + description = "Name of the plugin" +} + +variable "makefile_dir" { + type = string + description = "Plugin Project Makefile directory" + default = "$(PWD)" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { default = null } +variable "artifact_type" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } + +resource "enos_local_exec" "build" { + scripts = ["${path.module}/scripts/plugin-build.sh"] + + environment = { + PLUGIN_NAME = var.plugin_name + PLUGIN_DIR = var.plugin_dest_dir + MAKEFILE_DIR = var.makefile_dir + GOARCH = var.goarch + GOOS = var.goos + } + +} diff --git a/enos/modules/build_local/scripts/plugin-build.sh b/enos/modules/build_local/scripts/plugin-build.sh new file mode 100755 index 0000000..5d7a027 --- /dev/null +++ b/enos/modules/build_local/scripts/plugin-build.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Expect these environment variables: +# PLUGIN_NAME +# PLUGIN_DIR + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" +[[ -z "$PLUGIN_DIR" ]] && fail "PLUGIN_DIR env variable has not been set" + +echo "[build] PLUGIN_NAME=${PLUGIN_NAME:-}" +echo "[build] PLUGIN_DIR=${PLUGIN_DIR:-}" + +# Remove from project .bin directory if it exists +PROJECT_BIN_DIR="${MAKEFILE_DIR}/bin" +if [ -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[build] Removing existing plugin at ${PROJECT_BIN_DIR}/${PLUGIN_NAME}" + rm -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" +fi + +# Ensure destination directory exists +mkdir -p "${PLUGIN_DIR}" + +# Remove existing plugin (if present) before copying new one +if [ -f "${PLUGIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[build] Removing existing plugin at ${PLUGIN_DIR}/${PLUGIN_NAME}" + rm -f "${PLUGIN_DIR}/${PLUGIN_NAME}" +fi + +# Build plugin +pushd "${MAKEFILE_DIR}" >/dev/null + GOOS="${GOOS:-$(go env GOOS)}" + GOARCH="${GOARCH:-$(go env GOARCH)}" + echo "[build] GOOS=${GOOS} GOARCH=${GOARCH}" + GOOS="${GOOS}" GOARCH="${GOARCH}" make dev +popd >/dev/null + +# Copy and set executable bit +cp "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" "${PLUGIN_DIR}/${PLUGIN_NAME}" +chmod +x "${PLUGIN_DIR}/${PLUGIN_NAME}" + +# Zip up the plugin binary into a bundle +ZIP_FILE="${PLUGIN_DIR}/${PLUGIN_NAME}.zip" +pushd "${PLUGIN_DIR}" >/dev/null + zip -j "${ZIP_FILE}" "${PLUGIN_NAME}" +popd >/dev/null + +echo "[build] Plugin built and zipped at ${ZIP_FILE}" \ No newline at end of file diff --git a/enos/modules/build_releases/main.tf b/enos/modules/build_releases/main.tf new file mode 100644 index 0000000..bb51ef6 --- /dev/null +++ b/enos/modules/build_releases/main.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Shim module since Releases provided things will use the ldap_release variable +variable "bundle_path" { + default = "/tmp/vault.zip" +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { default = null } +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "revision" { default = null } +variable "makefile_dir" { default = null } +variable "plugin_name" { default = null } +variable "product_version" { default = null } +variable "plugin_dest_dir" { default = null } diff --git a/enos/modules/configure_plugin/ldap/main.tf b/enos/modules/configure_plugin/ldap/main.tf new file mode 100644 index 0000000..0cf87d9 --- /dev/null +++ b/enos/modules/configure_plugin/ldap/main.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + admin_dn = "cn=admin,${var.ldap_base_dn}" + ldap_url = "ldap://${var.ldap_host}:${var.ldap_port}" + users_dn = "ou=users,${var.ldap_base_dn}" +} + +# Configure the plugin +resource "enos_remote_exec" "plugin_configure" { + scripts = [abspath("${path.module}/scripts/plugin-configure.sh")] + environment = { + PLUGIN_PATH = var.plugin_mount_path + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + LDAP_URL = local.ldap_url + LDAP_BIND_DN = local.admin_dn + LDAP_BIND_PASS = var.ldap_bind_pass + LDAP_USER_DN = local.users_dn + LDAP_SCHEMA = var.ldap_schema + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} \ No newline at end of file diff --git a/enos/modules/configure_plugin/ldap/scripts/plugin-configure.sh b/enos/modules/configure_plugin/ldap/scripts/plugin-configure.sh new file mode 100755 index 0000000..3275f64 --- /dev/null +++ b/enos/modules/configure_plugin/ldap/scripts/plugin-configure.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Required ENV vars: +# PLUGIN_PATH - Mount path for plugin (e.g., 'local-secrets-ldap') +# LDAP_URL - LDAP server URL (e.g., ldap://127.0.0.1:389) +# LDAP_BIND_DN - LDAP bind DN (e.g., cn=admin,dc=example,dc=com) +# LDAP_BIND_PASS - LDAP bind password +# LDAP_USER_DN - LDAP user DN base (e.g., ou=users,dc=example,dc=com) +# LDAP_SCHEMA - LDAP schema type (e.g., openldap) + +export VAULT_ADDR +export VAULT_TOKEN + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" +[[ -z "$LDAP_URL" ]] && fail "LDAP_URL env variable has not been set" +[[ -z "$LDAP_BIND_DN" ]] && fail "LDAP_BIND_DN env variable has not been set" +[[ -z "$LDAP_BIND_PASS" ]] && fail "LDAP_BIND_PASS env variable has not been set" +[[ -z "$LDAP_USER_DN" ]] && fail "LDAP_USER_DN env variable has not been set" +[[ -z "$LDAP_SCHEMA" ]] && fail "LDAP_SCHEMA env variable has not been set" + +echo "[configure] Configuring plugin at $PLUGIN_PATH" + +vault write "${PLUGIN_PATH}/config" \ + url="${LDAP_URL}" \ + binddn="${LDAP_BIND_DN}" \ + bindpass="${LDAP_BIND_PASS}" \ + userdn="${LDAP_USER_DN}" \ + schema="${LDAP_SCHEMA}" + +echo "[configure] Current plugin config:" +vault read "${PLUGIN_PATH}/config" \ No newline at end of file diff --git a/enos/modules/configure_plugin/ldap/variables.tf b/enos/modules/configure_plugin/ldap/variables.tf new file mode 100644 index 0000000..a041113 --- /dev/null +++ b/enos/modules/configure_plugin/ldap/variables.tf @@ -0,0 +1,47 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +variable "ldap_host" { + type = string + description = "The LDAP server host" +} + +variable "ldap_port" { + type = string + description = "The LDAP server port" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix" +} + +variable "ldap_bind_pass" { + type = string + description = "LDAP bind password" +} + +variable "ldap_schema" { + type = string + description = "LDAP schema type" +} diff --git a/enos/modules/dynamic_role_crud_api/ldif/creation.ldif b/enos/modules/dynamic_role_crud_api/ldif/creation.ldif new file mode 100644 index 0000000..981e479 --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/ldif/creation.ldif @@ -0,0 +1,7 @@ +dn: uid={{.Username}},ou=users,dc=example,dc=com +objectClass: inetOrgPerson +uid: {{.Username}} +cn: {{.Username}} +sn: {{.Password | utf16le | base64}} +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: {{.Password}} \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/ldif/deletion.ldif b/enos/modules/dynamic_role_crud_api/ldif/deletion.ldif new file mode 100644 index 0000000..3b1551e --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/ldif/deletion.ldif @@ -0,0 +1,2 @@ +dn: uid={{.Username}},ou=users,dc=example,dc=com +changetype: delete \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/ldif/rollback.ldif b/enos/modules/dynamic_role_crud_api/ldif/rollback.ldif new file mode 100644 index 0000000..3b1551e --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/ldif/rollback.ldif @@ -0,0 +1,2 @@ +dn: uid={{.Username}},ou=users,dc=example,dc=com +changetype: delete \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/main.tf b/enos/modules/dynamic_role_crud_api/main.tf new file mode 100644 index 0000000..06ea329 --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/main.tf @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + ldif_files = fileset("${path.module}/ldif", "*") + file_host_pairs = flatten([ + for i in range(length(var.hosts)) : [ + for file in local.ldif_files : { + host_index = i + public_ip = var.hosts[i].public_ip + file = file + } + ] + ]) + file_host_map = { + for item in local.file_host_pairs : + "${item["host_index"]}_${item["file"]}" => item + } + users_dn = "ou=users,${var.ldap_base_dn}" + ldap_user_dn_tpl = "uid={{username}},${local.users_dn}" +} + +# Copy LDIF files to the hosts +resource "enos_file" "ldif_files" { + for_each = local.file_host_map + source = abspath("${path.module}/ldif/${each.value["file"]}") + destination = "${var.dynamic_role_ldif_templates_path}/${each.value["file"]}" + transport = { + ssh = { + host = each.value["public_ip"] + } + } +} + +# Execute the dynamic role CRUD API test script on the Vault leader +resource "enos_remote_exec" "dynamic_role_crud_api_test" { + depends_on = [enos_file.ldif_files] + scripts = ["${path.module}/scripts/dynamic-role.sh"] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + LDAP_HOST = var.ldap_host + LDAP_PORT = var.ldap_port + + ROLE_NAME = var.ldap_dynamic_user_role_name + LDAP_USER_DN_TPL = local.ldap_user_dn_tpl + LDIF_PATH = var.dynamic_role_ldif_templates_path + } + + transport = { + ssh = { + host = var.vault_leader_ip + } + } + +} \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh b/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh new file mode 100755 index 0000000..fa6d58d --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Test Vault LDAP Dynamic Role CRUD and credential lifecycle using provided LDIFs. +# Assumptions: +# - You have uploaded creation.ldif, deletion.ldif, and rollback.ldif to the server. +# - Vault CLI is authenticated and VAULT_ADDR and VAULT_TOKEN are set. +# - Required ENV vars: +# PLUGIN_PATH (e.g., local-secrets-ldap) +# ROLE_NAME (e.g., adam) +# LDAP_HOST +# LDAP_PORT +# LDAP_USER_DN_TPL (e.g., uid={{username}},ou=users,dc=example,dc=com) +# LDIF_PATH (path to directory containing creation.ldif, deletion.ldif, rollback.ldif) + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$ROLE_NAME" ]] && fail "ROLE_NAME env variable has not been set" +[[ -z "$LDAP_HOST" ]] && fail "LDAP_HOST env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDAP_USER_DN_TPL" ]] && fail "LDAP_USER_DN_TPL env variable has not been set" +[[ -z "$LDIF_PATH" ]] && fail "LDIF_PATH env variable has not been set" + +export VAULT_ADDR +export VAULT_TOKEN + +ROLE_PATH="${PLUGIN_PATH}/role/${ROLE_NAME}" + +echo "==> Creating dynamic role: ${ROLE_NAME}" +vault write "${ROLE_PATH}" \ + creation_ldif=@"${LDIF_PATH}/creation.ldif" \ + deletion_ldif=@"${LDIF_PATH}/deletion.ldif" \ + rollback_ldif=@"${LDIF_PATH}/rollback.ldif" \ + default_ttl="2m" \ + max_ttl="10m" + +echo "==> Reading dynamic role" +vault read "${ROLE_PATH}" + +echo "==> Listing dynamic roles" +vault list "${PLUGIN_PATH}/role" + +echo "==> Requesting dynamic credentials" +CRED_PATH="${PLUGIN_PATH}/creds/${ROLE_NAME}" +if ! DYNAMIC_CREDS=$(vault read -format=json "${CRED_PATH}"); then + fail "Vault read failed when requesting dynamic credentials from ${CRED_PATH}" +fi +DYN_USERNAME=$(echo "${DYNAMIC_CREDS}" | jq -r .data.username) +DYN_PASSWORD=$(echo "${DYNAMIC_CREDS}" | jq -r .data.password) +LEASE_ID=$(echo "${DYNAMIC_CREDS}" | jq -r .lease_id) +if [[ -z "${DYN_USERNAME}" || -z "${DYN_PASSWORD}" || \ +-z "${LEASE_ID}" || "${DYN_USERNAME}" == "null" || \ +"${DYN_PASSWORD}" == "null" || "${LEASE_ID}" == "null" ]]; then + fail "Invalid dynamic credentials returned: ${DYNAMIC_CREDS}" +fi +echo "==> Got dynamic username: ${DYN_USERNAME}" +echo "==> Got dynamic password: ${DYN_PASSWORD}" +echo "==> Lease ID: ${LEASE_ID}" + +# Build the DN for the dynamic user +DYN_DN=${LDAP_USER_DN_TPL/\{\{username\}\}/$DYN_USERNAME} + +echo "==> Verifying login with dynamic credentials" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${DYN_PASSWORD}" -D "${DYN_DN}"; then + echo "[OK] Dynamic user login succeeded." +else + echo "[ERROR] Dynamic user login failed!" + exit 1 +fi + +echo "==> Revoking dynamic credentials (deletes LDAP user)" +vault lease revoke "${LEASE_ID}" + +sleep 2 + +echo "==> Verifying dynamic user is deleted" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${DYN_PASSWORD}" -D "${DYN_DN}"; then + echo "[ERROR] Dynamic user still exists after lease revoke!" + exit 1 +else + echo "[OK] Dynamic user deleted as expected." +fi + +echo "==> Deleting dynamic role" +vault delete "${ROLE_PATH}" + +echo "==> Confirming dynamic role deletion" +if vault read "${ROLE_PATH}"; then + echo "[ERROR] Dynamic role still exists after deletion!" + exit 1 +else + echo "[OK] Dynamic role deleted successfully." +fi + +echo "==> Dynamic role CRUD and credential lifecycle test: SUCCESS" \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/variables.tf b/enos/modules/dynamic_role_crud_api/variables.tf new file mode 100644 index 0000000..a9a0456 --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/variables.tf @@ -0,0 +1,53 @@ +variable "dynamic_role_ldif_templates_path" { + type = string + description = "LDIF files path" +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix" +} + +variable "ldap_host" { + type = string + description = "LDAP IP or hostname" +} + +variable "ldap_port" { + type = string + description = "LDAP port" +} + +variable "ldap_dynamic_user_role_name" { + type = string + description = "LDAP role name to be created" +} + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} \ No newline at end of file diff --git a/enos/modules/ec2_bootstrap_tools/main.tf b/enos/modules/ec2_bootstrap_tools/main.tf new file mode 100644 index 0000000..eb6e6c4 --- /dev/null +++ b/enos/modules/ec2_bootstrap_tools/main.tf @@ -0,0 +1,83 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +# Ensure the Vault plugin directory exists +resource "enos_remote_exec" "create_plugin_directory" { + for_each = var.hosts + + environment = { + PLUGIN_DIR = var.plugin_dir_vault + } + + scripts = [abspath("${path.module}/scripts/create-plugin-dir.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +#TODO: In the future, we should use the plugin_directory attribute in enos_vault_start resource when supported. + +# Add plugin directory to the config file +resource "enos_remote_exec" "add_plugin_directory_to_config" { + depends_on = [enos_remote_exec.create_plugin_directory] + for_each = var.hosts + + inline = [ + "echo \"plugin_directory = \\\"${var.plugin_dir_vault}\\\"\" | sudo tee -a /etc/vault.d/vault.hcl" + ] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Restart Vault service on all hosts +resource "enos_remote_exec" "restart_vault" { + depends_on = [enos_remote_exec.add_plugin_directory_to_config] + + for_each = var.hosts + + inline = [ + "sudo systemctl restart vault" + ] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Unseal Vault +resource "enos_remote_exec" "unseal_vault" { + depends_on = [enos_remote_exec.restart_vault] + + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/vault-unseal.sh")] + + environment = { + VAULT_ADDR = var.vault_addr + UNSEAL_KEYS = join(",", var.unseal_keys) + THRESHOLD = tostring(var.threshold) + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} \ No newline at end of file diff --git a/enos/modules/ec2_bootstrap_tools/scripts/create-plugin-dir.sh b/enos/modules/ec2_bootstrap_tools/scripts/create-plugin-dir.sh new file mode 100644 index 0000000..9c591cd --- /dev/null +++ b/enos/modules/ec2_bootstrap_tools/scripts/create-plugin-dir.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +PLUGIN_DIR="${PLUGIN_DIR:-/etc/vault/plugins}" + +sudo mkdir -p "$PLUGIN_DIR" +sudo chown vault:vault "$PLUGIN_DIR" \ No newline at end of file diff --git a/enos/modules/ec2_bootstrap_tools/scripts/vault-unseal.sh b/enos/modules/ec2_bootstrap_tools/scripts/vault-unseal.sh new file mode 100644 index 0000000..54ba3cd --- /dev/null +++ b/enos/modules/ec2_bootstrap_tools/scripts/vault-unseal.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +if [[ -z "$VAULT_ADDR" || -z "$UNSEAL_KEYS" || -z "$THRESHOLD" ]]; then + echo "Usage: $0 " + exit 1 +fi + +IFS=',' read -ra KEYS <<< "$UNSEAL_KEYS" + +export VAULT_ADDR + +for ((i=0; i&2 + exit 1 +} + +# Required environment variables +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$LDAP_HOST" ]] && fail "LDAP_HOST env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDAP_BASE_DN" ]] && fail "LDAP_BASE_DN env variable has not been set" +[[ -z "$LIBRARY_SET_NAME" ]] && fail "LIBRARY_SET_NAME env variable has not been set" +[[ -z "$SERVICE_ACCOUNT_NAMES" ]] && fail "SERVICE_ACCOUNT_NAMES env variable has not been set" + +export VAULT_ADDR +export VAULT_TOKEN + +LIB_PATH="${PLUGIN_PATH}/library/${LIBRARY_SET_NAME}" +STATUS_PATH="${LIB_PATH}/status" +CHECKOUT_PATH="${LIB_PATH}/check-out" +CHECKIN_PATH="${LIB_PATH}/check-in" +MANAGE_CHECKIN_PATH="${PLUGIN_PATH}/library/manage/${LIBRARY_SET_NAME}/check-in" + +# Verify SERVICE_ACCOUNT_NAMES parsing +IFS=',' read -r -a SA_LIST <<< "$SERVICE_ACCOUNT_NAMES" +if [[ ${#SA_LIST[@]} -lt 1 ]]; then + fail "SERVICE_ACCOUNT_NAMES must contain at least one account" +fi + +# Create library set +echo "==> Creating library set ${LIBRARY_SET_NAME}" +vault write "${LIB_PATH}" \ + service_account_names="${SERVICE_ACCOUNT_NAMES}" \ + ttl="1h" \ + max_ttl="2h" \ + disable_check_in_enforcement=false + +# Read library set +echo "==> Reading library set" +vault read "${LIB_PATH}" + +# List all library sets and verify ours is present +echo "==> Verifying library set appears in list" +LIST_OUTPUT=$(vault list "${PLUGIN_PATH}/library" 2>/dev/null) +echo "$LIST_OUTPUT" | grep -x "${LIBRARY_SET_NAME}" >/dev/null || fail "Library set '${LIBRARY_SET_NAME}' not found in list" + +# Check status +echo "==> Checking library set status" +vault read "${STATUS_PATH}" + +# Check out a service account +echo "==> Checking out a service account" +CRED_JSON=$(vault write -format=json "${CHECKOUT_PATH}" ttl="30m") +SA_NAME=$(echo "$CRED_JSON" | jq -r .data.service_account_name) +SA_PW=$(echo "$CRED_JSON" | jq -r .data.password) +LEASE_ID=$(echo "$CRED_JSON" | jq -r .lease_id) + +# Validate checkout output +if [[ -z "$SA_NAME" || "$SA_NAME" == "null" ]]; then + fail "No service_account_name returned from check-out" +fi +if [[ -z "$SA_PW" || "$SA_PW" == "null" ]]; then + fail "No password returned from check-out" +fi +if [[ -z "$LEASE_ID" || "$LEASE_ID" == "null" ]]; then + fail "No lease_id returned from check-out" +fi + +# Attempt second check-out should fail +echo "==> Verifying no second check-out is allowed" +if vault write -format=json "${CHECKOUT_PATH}" 2>/dev/null; then + fail "Unexpectedly succeeded second check-out: account wasn't exclusive" +else + echo "[OK] Second check-out is correctly unavailable" +fi + +# Status after checkout +echo "==> Status after check-out" +vault read "${STATUS_PATH}" + +# Renew the lease +echo "==> Renewing lease ${LEASE_ID}" +RENEW_JSON=$(vault lease renew -format=json "${LEASE_ID}") +RENEW_TTL=$(echo "$RENEW_JSON" | jq -r .lease_duration) +if [[ -z "$RENEW_TTL" || "$RENEW_TTL" == "null" ]]; then + fail "Lease renew failed: no lease_duration returned" +fi +# Revoke the lease (auto check-in) +echo "==> Revoking lease ${LEASE_ID} to auto check-in" +vault lease revoke "${LEASE_ID}" +sleep 2 +# Verify account available after revoke +echo "==> Verifying account is available after lease revoke" +POST_REVOKE_AVAIL=$(vault read -format=json "${STATUS_PATH}" | jq -r ".data[\"$SA_NAME\"].available") +if [[ "$POST_REVOKE_AVAIL" != "true" ]]; then + fail "Account '$SA_NAME' should be available after lease revoke" +fi +# Attempt check-in on already available account (should succeed with empty check_ins) +echo "==> Checking in already available account (expect no check_ins)" +CI_JSON=$(vault write -format=json "${CHECKIN_PATH}" service_account_names="${SA_NAME}") +CI_COUNT=$(echo "$CI_JSON" | jq -r '.data.check_ins | length') +if [[ "$CI_COUNT" -ne 0 ]]; then + fail "Expected 0 check_ins when checking in an already available account, got $CI_COUNT" +fi + +# Check the account back in +echo "==> Checking in ${SA_NAME}" +vault write "${CHECKIN_PATH}" service_account_names="${SA_NAME}" + +# Status after check-in +echo "==> Status after check-in" +vault read "${STATUS_PATH}" + +# Force check-in of all accounts +echo "==> Forcing manage-level check-in of all accounts" +vault write "${MANAGE_CHECKIN_PATH}" service_account_names="${SERVICE_ACCOUNT_NAMES}" + +# After force check-in, verify both accounts available +echo "==> Checking status after manage-level check-in" +STATUS_AFTER_MANAGE=$(vault read -format=json "${STATUS_PATH}") +for acct in "${SA_LIST[@]}"; do + avail=$(echo "$STATUS_AFTER_MANAGE" | jq -r ".data[\"$acct\"].available") + if [[ "$avail" != "true" ]]; then + fail "Account '$acct' should be available after manage-level check-in" + fi +done + +# Test TTL expiry automatic check-in +echo "==> Testing TTL expiry automatic check-in" +TTL_TEST_JSON=$(vault write -format=json "${CHECKOUT_PATH}" ttl="10s") +TTL_NAME=$(echo "$TTL_TEST_JSON" | jq -r .data.service_account_name) +echo "Checked out ${TTL_NAME} with 10s TTL, waiting 12s" +sleep 12 +POST_TTL_AVAIL=$(vault read -format=json "${STATUS_PATH}" | jq -r ".data[\"$TTL_NAME\"].available") +if [[ "$POST_TTL_AVAIL" != "true" ]]; then + fail "Account '$TTL_NAME' should be available after TTL expiry" +fi + +# Delete library set +echo "==> Deleting library set" +vault delete "${LIB_PATH}" + +# Confirm deletion and absence from list +echo "==> Confirming deletion" +if vault read "${LIB_PATH}" 2>/dev/null; then + fail "Library set still exists after deletion!" +else + echo "[OK] Library set deleted successfully." +fi +LIST_AFTER_DEL=$(vault list "${PLUGIN_PATH}/library" 2>/dev/null || true) + +# Ensure the set no longer appears +if echo "$LIST_AFTER_DEL" | grep -x "${LIBRARY_SET_NAME}" >/dev/null; then + fail "Library set '${LIBRARY_SET_NAME}' still in list after deletion" +fi diff --git a/enos/modules/library_crud_api/variables.tf b/enos/modules/library_crud_api/variables.tf new file mode 100644 index 0000000..92ac875 --- /dev/null +++ b/enos/modules/library_crud_api/variables.tf @@ -0,0 +1,47 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +variable "ldap_host" { + type = string + description = "The LDAP server host" +} + +variable "ldap_port" { + type = string + description = "The LDAP server port" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix (e.g., dc=example,dc=com)" +} + +variable "library_set_name" { + type = string + description = "Name of the LDAP library set to create" +} + +variable "service_account_names" { + type = list(string) + description = "List of service account UIDs (under ou=users) for the library set" +} + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} diff --git a/enos/modules/root_rotation_manual/main.tf b/enos/modules/root_rotation_manual/main.tf new file mode 100644 index 0000000..bbac5b0 --- /dev/null +++ b/enos/modules/root_rotation_manual/main.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "rotation_period" { default = null } +variable "rotation_window" { default = null } + +resource "enos_remote_exec" "root_rotation_manual_test" { + scripts = [abspath("${path.module}/scripts/test-root-rotation-manual.sh")] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + } + + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} + diff --git a/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh b/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh new file mode 100755 index 0000000..5eea376 --- /dev/null +++ b/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -euo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Required env vars: PLUGIN_PATH +if [[ -z "${PLUGIN_PATH:-}" ]]; then + fail "PLUGIN_PATH env variable has not been set" +fi + +# Configure plugin for manual rotation +vault write -format=json "${PLUGIN_PATH}/config" \ + disable_automated_rotation=true \ + rotation_period=0 \ + rotation_schedule="" \ + rotation_window=0 >/dev/null + +# Read disable_automated_rotation from config +disable_automated_rotation=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.disable_automated_rotation') + +# Validate disable_automated_rotation +if [[ "$disable_automated_rotation" != "true" ]]; then + fail "[ERROR] Expected rotation_schedule=true, got $disable_automated_rotation" +fi + +# Read pre-rotation timestamp +before=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +# Trigger manual rotation +vault write -format=json -f "${PLUGIN_PATH}/rotate-root" >/dev/null + +# Read post-rotation timestamp after a brief pause +after=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +if [[ "$after" == "$before" ]]; then + fail "[ERROR] Manual rotation failed: timestamp did not change (before=$before, after=$after)" +fi + +echo "[OK] Manual rotation succeeded: timestamp updated (before=$before, after=$after)" diff --git a/enos/modules/root_rotation_manual/variables.tf b/enos/modules/root_rotation_manual/variables.tf new file mode 100644 index 0000000..0cf21c6 --- /dev/null +++ b/enos/modules/root_rotation_manual/variables.tf @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "Vault API address" +} + +variable "vault_root_token" { + type = string + description = "Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "SSH host/IP of Vault leader for remote exec" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path of the LDAP plugin in Vault" +} diff --git a/enos/modules/root_rotation_period/main.tf b/enos/modules/root_rotation_period/main.tf new file mode 100644 index 0000000..1dbfd65 --- /dev/null +++ b/enos/modules/root_rotation_period/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "rotation_window" { default = null } + +resource "enos_remote_exec" "root_rotation_period_test" { + scripts = [abspath("${path.module}/scripts/test-root-rotation-period.sh")] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + ROTATION_PERIOD = var.rotation_period + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} diff --git a/enos/modules/root_rotation_period/scripts/test-root-rotation-period.sh b/enos/modules/root_rotation_period/scripts/test-root-rotation-period.sh new file mode 100755 index 0000000..78c0005 --- /dev/null +++ b/enos/modules/root_rotation_period/scripts/test-root-rotation-period.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -euo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Required env vars: PLUGIN_PATH, ROTATION_PERIOD +if [[ -z "${PLUGIN_PATH:-}" ]]; then fail "PLUGIN_PATH not set"; fi +if [[ -z "${ROTATION_PERIOD:-}" ]]; then fail "ROTATION_PERIOD not set"; fi + +# Configure plugin for rotation period +vault write -format=json "${PLUGIN_PATH}/config" \ + disable_automated_rotation=false \ + rotation_period="${ROTATION_PERIOD}" \ + rotation_schedule="" \ + rotation_window=0 >/dev/null + +# Add cross-platform parse_epoch helper +parse_epoch() { + python3 -c " +import sys, datetime, re +ts = sys.argv[1] +if ts == 'null': + print(0) + sys.exit(0) +# Remove Z and handle nanoseconds +if ts.endswith('Z'): + ts = ts[:-1] +match = re.match(r'(.*\.\d{6})\d*(.*)', ts) +if match: + ts = match.group(1) + match.group(2) +dt = datetime.datetime.fromisoformat(ts) +print(int(dt.timestamp())) +" "$1" +} + +# Read rotation_period from config +rotation_period=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.rotation_period') + +# Validate rotation_period +if [[ "$rotation_period" != "$ROTATION_PERIOD" ]]; then + fail "[ERROR] Expected rotation_period=$ROTATION_PERIOD, got $rotation_period" +fi + +# Read timestamp before rotation +before=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +# Convert to epoch +before_epoch=$(parse_epoch "$before") + +# Wait for rotation_period + 1 seconds +echo "==> Sleeping for $((ROTATION_PERIOD + 1)) seconds for automated rotation" +sleep $((ROTATION_PERIOD + 1)) + +# Read timestamp after rotation +after=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +after_epoch=$(parse_epoch "$after") + +# Assert a rotation occurred +if [[ "$before" == "null" ]]; then + echo "[INFO] No previous rotation timestamp found (before=null), first rotation expected." +fi +if [[ "$after" == "null" ]]; then + fail "[ERROR] No rotation occurred, after=null" +fi + +# Compute difference +diff=$((after_epoch - before_epoch)) +if [[ "$diff" -lt "$ROTATION_PERIOD" ]]; then + fail "[ERROR] Automated rotation did not occur: delta $diff < $ROTATION_PERIOD" +fi + +#final check: + +echo "[OK] Automated rotation succeeded: delta $diff >= $ROTATION_PERIOD" diff --git a/enos/modules/root_rotation_period/variables.tf b/enos/modules/root_rotation_period/variables.tf new file mode 100644 index 0000000..3434a88 --- /dev/null +++ b/enos/modules/root_rotation_period/variables.tf @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "Vault API address" +} + +variable "vault_root_token" { + type = string + description = "Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "SSH host/IP of Vault leader for remote exec" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path of the LDAP plugin in Vault" +} + +variable "rotation_period" { + type = number + description = "Automated rotation period in seconds for the LDAP root credentials" +} diff --git a/enos/modules/root_rotation_schedule/main.tf b/enos/modules/root_rotation_schedule/main.tf new file mode 100644 index 0000000..c4e7ad4 --- /dev/null +++ b/enos/modules/root_rotation_schedule/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "rotation_period" { default = null } + +resource "enos_remote_exec" "root_rotation_schedule_test" { + scripts = [abspath("${path.module}/scripts/test-root-rotation-schedule.sh")] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + ROTATION_WINDOW = var.rotation_window + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} diff --git a/enos/modules/root_rotation_schedule/scripts/test-root-rotation-schedule.sh b/enos/modules/root_rotation_schedule/scripts/test-root-rotation-schedule.sh new file mode 100755 index 0000000..122b022 --- /dev/null +++ b/enos/modules/root_rotation_schedule/scripts/test-root-rotation-schedule.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -euo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Required env vars: PLUGIN_PATH, ROTATION_WINDOW +if [[ -z "${PLUGIN_PATH:-}" ]]; then fail "PLUGIN_PATH not set"; fi +if [[ -z "${ROTATION_WINDOW:-}" ]]; then fail "ROTATION_WINDOW not set"; fi + +# Compute cron schedule one minute from now +schedule=$(python3 - < Using cron schedule: $schedule" + +# Configure plugin for schedule-based rotation +vault write -format=json "${PLUGIN_PATH}/config" \ + disable_automated_rotation=false \ + rotation_schedule="$schedule" \ + rotation_window="${ROTATION_WINDOW}" \ + rotation_period=0 >/dev/null + +# Read rotation_schedule from config +rotation_schedule=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.rotation_schedule') + +# Validate rotation_schedule +if [[ "$rotation_schedule" != "$schedule" ]]; then + fail "[ERROR] Expected rotation_schedule=$schedule, got $rotation_schedule" +fi + +# Read rotation_window from config +rotation_window=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.rotation_window') + +# Validate rotation_window +if [[ "$rotation_window" != "$ROTATION_WINDOW" ]]; then + fail "[ERROR] Expected rotation_period=$ROTATION_WINDOW, got $rotation_window" +fi + +# Cross-platform parse_epoch helper +parse_epoch() { + python3 -c " +import sys, datetime, re +ts = sys.argv[1] +if ts == 'null': + print(0) + sys.exit(0) +# Remove Z and handle nanoseconds +if ts.endswith('Z'): + ts = ts[:-1] +match = re.match(r'(.*\.\d{6})\d*(.*)', ts) +if match: + ts = match.group(1) + match.group(2) +dt = datetime.datetime.fromisoformat(ts) +print(int(dt.timestamp())) +" "$1" +} + +# Read timestamp before window expiration +before=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') +before_epoch=$(parse_epoch "$before") + +sleep 61 # Wait for the cron job to trigger + +# Read timestamp after window expiration +after=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') +after_epoch=$(parse_epoch "$after") + +# Assert a rotation occurred +if [[ "$before" == "null" ]]; then + echo "[INFO] No previous rotation timestamp found (before=null), first rotation expected." +fi +if [[ "$after" == "null" ]]; then + fail "[ERROR] No rotation occurred, after=null" +fi + +diff=$((after_epoch - before_epoch)) +if [[ "$diff" -eq 0 ]]; then + fail "[ERROR] No rotation occurred at $after" +fi + +echo "[OK] Rotation occurred at $after" diff --git a/enos/modules/root_rotation_schedule/variables.tf b/enos/modules/root_rotation_schedule/variables.tf new file mode 100644 index 0000000..033f7de --- /dev/null +++ b/enos/modules/root_rotation_schedule/variables.tf @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "Vault API address" +} + +variable "vault_root_token" { + type = string + description = "Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "SSH host/IP of Vault leader for remote exec" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path of the LDAP plugin in Vault" +} + +variable "rotation_window" { + type = number + description = "Maximum time in seconds allowed to complete a scheduled rotation" + default = 3600 + + validation { + condition = var.rotation_window >= 3600 + error_message = "rotation_window must be at least 3600 seconds (1 hour)." + } +} \ No newline at end of file diff --git a/enos/modules/setup_plugin/main.tf b/enos/modules/setup_plugin/main.tf new file mode 100644 index 0000000..9c43509 --- /dev/null +++ b/enos/modules/setup_plugin/main.tf @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +# Step 1: Install the plugin bundle to the target hosts +resource "enos_bundle_install" "ldap" { + for_each = var.hosts + + destination = "/tmp/${var.plugin_name}" + release = var.release == null ? var.release : merge({ product = "vault-plugin-secrets-openldap" }, var.release) + artifactory = var.artifactory_release + path = var.local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Step 2: Clean up the plugin directory and copy the plugin binary there +resource "enos_remote_exec" "plugin_copy" { + depends_on = [enos_bundle_install.ldap] + for_each = var.hosts + scripts = [abspath("${path.module}/scripts/plugin-copy.sh")] + environment = { + PLUGIN_BINARY_SRC = "/tmp/${var.plugin_name}" + PLUGIN_DIR_VAULT = var.plugin_dir_vault + PLUGIN_NAME = var.plugin_name + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Step 3: Register the plugin +resource "enos_remote_exec" "plugin_register" { + depends_on = [enos_remote_exec.plugin_copy] + scripts = [abspath("${path.module}/scripts/plugin-register.sh")] + environment = { + PLUGIN_BINARY_SRC = "/tmp/${var.plugin_name}" + PLUGIN_DIR_VAULT = var.plugin_dir_vault + PLUGIN_NAME = var.plugin_name + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} + +# Step 4: Enable the plugin +resource "enos_remote_exec" "plugin_enable" { + depends_on = [enos_remote_exec.plugin_register] + scripts = [abspath("${path.module}/scripts/plugin-enable.sh")] + environment = { + PLUGIN_NAME = var.plugin_name + PLUGIN_PATH = var.plugin_mount_path + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} \ No newline at end of file diff --git a/enos/modules/setup_plugin/scripts/plugin-copy.sh b/enos/modules/setup_plugin/scripts/plugin-copy.sh new file mode 100644 index 0000000..ac9aeda --- /dev/null +++ b/enos/modules/setup_plugin/scripts/plugin-copy.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Required ENV vars: +# PLUGIN_BINARY_SRC - Where the plugin binary is (built artifact) +# PLUGIN_DIR_VAULT - Vault's plugin directory +# PLUGIN_NAME - Name to register in Vault + +export VAULT_ADDR +export VAULT_TOKEN + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_BINARY_SRC" ]] && fail "PLUGIN_BINARY_SRC env variable has not been set" +[[ -z "$PLUGIN_DIR_VAULT" ]] && fail "PLUGIN_DIR_VAULT env variable has not been set" +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" + +echo "[register] Registering plugin: $PLUGIN_NAME" + +# Determine plugin binary source path (handle directories) +if [[ -d "$PLUGIN_BINARY_SRC" ]]; then + BINARY_PATH="$PLUGIN_BINARY_SRC/$PLUGIN_NAME" +else + BINARY_PATH="$PLUGIN_BINARY_SRC" +fi + +# Ensure the Vault plugin directory exists +mkdir -p "${PLUGIN_DIR_VAULT}" + +# Clean up any previous plugin binary +sudo rm -f "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" || true + +# Copy the binary to Vault's plugin directory +sudo cp "${BINARY_PATH}" "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" + +# Set permissions to ensure Vault can execute the plugin binary +sudo chmod 755 "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" \ No newline at end of file diff --git a/enos/modules/setup_plugin/scripts/plugin-enable.sh b/enos/modules/setup_plugin/scripts/plugin-enable.sh new file mode 100755 index 0000000..1de88dc --- /dev/null +++ b/enos/modules/setup_plugin/scripts/plugin-enable.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Required ENV vars: +# PLUGIN_NAME - Name registered in Vault +# PLUGIN_PATH - Mount path for secrets engine (e.g., 'local-secrets-ldap') + +export VAULT_ADDR +export VAULT_TOKEN + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" + +echo "[enable] Enabling plugin $PLUGIN_NAME at path $PLUGIN_PATH" + +# Disable previous mount if exists +vault secrets disable "${PLUGIN_PATH}" || true + +# Enable plugin at specified path +vault secrets enable -path="${PLUGIN_PATH}" "${PLUGIN_NAME}" + +echo "[enable] Plugin $PLUGIN_NAME enabled at $PLUGIN_PATH." \ No newline at end of file diff --git a/enos/modules/setup_plugin/scripts/plugin-register.sh b/enos/modules/setup_plugin/scripts/plugin-register.sh new file mode 100755 index 0000000..fed6bae --- /dev/null +++ b/enos/modules/setup_plugin/scripts/plugin-register.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Required ENV vars: +# PLUGIN_BINARY_SRC - Where the plugin binary is (built artifact) +# PLUGIN_DIR_VAULT - Vault's plugin directory +# PLUGIN_NAME - Name to register in Vault + +export VAULT_ADDR +export VAULT_TOKEN + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_BINARY_SRC" ]] && fail "PLUGIN_BINARY_SRC env variable has not been set" +[[ -z "$PLUGIN_DIR_VAULT" ]] && fail "PLUGIN_DIR_VAULT env variable has not been set" +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" + +echo "[register] Registering plugin: $PLUGIN_NAME" + +# Calculate shasum +SHASUM="$(shasum -a 256 "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" | awk '{print $1}')" +if [[ -z "$SHASUM" ]]; then + echo "[register] error: shasum not set" + exit 1 +fi +echo "[register] Plugin SHA256: $SHASUM" + +# Deregister any previous registration of this plugin +vault plugin deregister secret "${PLUGIN_NAME}" || true + +# Register plugin with Vault +vault plugin register \ + -sha256="${SHASUM}" \ + secret "${PLUGIN_NAME}" + +echo "[register] Plugin $PLUGIN_NAME registered successfully." \ No newline at end of file diff --git a/enos/modules/setup_plugin/variables.tf b/enos/modules/setup_plugin/variables.tf new file mode 100644 index 0000000..1c89f24 --- /dev/null +++ b/enos/modules/setup_plugin/variables.tf @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_name" { + type = string + description = "Name of the plugin" +} + +variable "plugin_source_type" { + type = string + description = "Plugin Source" + default = "local_build" + validation { + condition = contains(["local_build", "registry", "local_path"], var.plugin_source_type) + error_message = "plugin_source_type must be one of: 'local_build', 'registry', 'local_path'." + } +} + +variable "plugin_dir_vault" { + type = string + description = "Plugin directory on Vault side" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +variable "artifactory_release" { + type = object({ + token = string + url = string + sha256 = string + username = string + }) + description = "The Artifactory release information to install Vault artifacts from Artifactory" + default = null +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "LDAP release version and edition to install from releases.hashicorp.com" + default = null +} + +variable "local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" + default = null +} \ No newline at end of file diff --git a/enos/modules/static_role_crud_api/main.tf b/enos/modules/static_role_crud_api/main.tf new file mode 100644 index 0000000..9bf8c79 --- /dev/null +++ b/enos/modules/static_role_crud_api/main.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + admin_dn = "cn=admin,${var.ldap_base_dn}" + users_dn = "ou=users,${var.ldap_base_dn}" + user_dn = "uid=${var.ldap_username},${local.users_dn}" +} + +resource "enos_remote_exec" "static_role_crud_api_test" { + scripts = ["${path.module}/scripts/static-role.sh"] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + LDAP_HOST = var.ldap_host + LDAP_PORT = var.ldap_port + LDAP_DN = local.user_dn + LDAP_USERNAME = var.ldap_username + LDAP_OLD_PASSWORD = var.ldap_user_old_password + ROLE_NAME = var.ldap_user_role_name + LDAP_BIND_DN = local.admin_dn + LDAP_BIND_PASS = var.ldap_bind_pass + } + + transport = { + ssh = { + host = var.vault_leader_ip + } + } + +} \ No newline at end of file diff --git a/enos/modules/static_role_crud_api/scripts/static-role.sh b/enos/modules/static_role_crud_api/scripts/static-role.sh new file mode 100644 index 0000000..9c92d7e --- /dev/null +++ b/enos/modules/static_role_crud_api/scripts/static-role.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Test Vault LDAP Static Role CRUD and credential lifecycle using provided LDIFs. +# Assumptions: +# - Vault CLI is authenticated and VAULT_ADDR and VAULT_TOKEN are set. +# - Required ENV vars: +# PLUGIN_PATH - Path to the mounted plugin secrets engine (e.g., ldap-secrets/) +# LDAP_HOST - LDAP server hostname or IP (e.g., 127.0.0.1) +# LDAP_PORT - LDAP server port (e.g., 389) +# LDAP_DN - User DN (e.g., uid=mary.smith,ou=users,dc=example,dc=com) +# LDAP_USERNAME - LDAP username (e.g., mary.smith) +# LDAP_OLD_PASSWORD - The original LDAP password for testing (before Vault rotation) +# ROLE_NAME - Name of the static role to create (e.g., mary) + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" +[[ -z "$LDAP_HOST" ]] && fail "LDAP_HOST env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDAP_DN" ]] && fail "LDAP_DN env variable has not been set" +[[ -z "$LDAP_USERNAME" ]] && fail "LDAP_USERNAME env variable has not been set" +[[ -z "$LDAP_OLD_PASSWORD" ]] && fail "LDAP_OLD_PASSWORD env variable has not been set" +[[ -z "$ROLE_NAME" ]] && fail "ROLE_NAME env variable has not been set" +[[ -z "$LDAP_BIND_DN" ]] && fail "LDAP_BIND_DN env variable has not been set" +[[ -z "$LDAP_BIND_PASS" ]] && fail "LDAP_BIND_PASS env variable has not been set" + +export VAULT_ADDR +export VAULT_TOKEN + +ROLE_PATH="${PLUGIN_PATH}/static-role/${ROLE_NAME}" +CRED_PATH="${PLUGIN_PATH}/static-cred/${ROLE_NAME}" + +echo "==> LDAP_HOST: ${LDAP_HOST}" +echo "==> LDAP_PORT: ${LDAP_PORT}" + +echo "==> Creating static role ${ROLE_NAME}" +vault write "${ROLE_PATH}" \ + dn="${LDAP_DN}" \ + username="${LDAP_USERNAME}" \ + rotation_period="5m" + +echo "==> Reading static role" +vault read "${ROLE_PATH}" + +echo "==> Reading credentials" +vault read "${CRED_PATH}" + +echo "==> Listing all static roles" +vault list "${PLUGIN_PATH}/static-role" + +echo "==> LDAP check: old password should fail after rotation" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${LDAP_OLD_PASSWORD}" -D "${LDAP_DN}"; then + echo "[ERROR] Old password still works! Rotation failed." + exit 1 +else + echo "[OK] Old password rejected as expected." +fi + +echo "==> LDAP check: new password should succeed" +NEW_PASSWORD=$(vault read -field=password "${CRED_PATH}") +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${NEW_PASSWORD}" -D "${LDAP_DN}"; then + echo "[OK] New password accepted as expected." +else + echo "[ERROR] New password did not work!" + exit 1 +fi + +echo "==> Forcing manual rotation for static role" +vault write -force "${PLUGIN_PATH}/rotate-role/${ROLE_NAME}" +echo "==> Reading credentials after manual rotation" +ROTATED_PASSWORD=$(vault read -field=password "${CRED_PATH}") +echo "==> LDAP check: old generated password should be rejected" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${NEW_PASSWORD}" -D "${LDAP_DN}"; then + echo "[ERROR] Previously generated password still works after manual rotation!" + exit 1 +else + echo "[OK] Old generated password rejected as expected." +fi +echo "==> LDAP check: new rotated password should succeed" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${ROTATED_PASSWORD}" -D "${LDAP_DN}"; then + echo "[OK] Rotated password accepted as expected." +else + echo "[ERROR] Rotated password did not work!" + exit 1 +fi + +echo "==> Updating static role (change rotation_period)" +vault write "${ROLE_PATH}" \ + dn="${LDAP_DN}" \ + username="${LDAP_USERNAME}" \ + rotation_period="10m" + +echo "==> Reading updated static role" +vault read "${ROLE_PATH}" + +echo "==> Deleting static role" +vault delete "${ROLE_PATH}" + +echo "==> Confirming deletion" +if vault read "${ROLE_PATH}"; then + echo "[ERROR] Static role still exists after deletion!" + exit 1 +else + echo "[OK] Static role deleted successfully." +fi \ No newline at end of file diff --git a/enos/modules/static_role_crud_api/variables.tf b/enos/modules/static_role_crud_api/variables.tf new file mode 100644 index 0000000..862ae7a --- /dev/null +++ b/enos/modules/static_role_crud_api/variables.tf @@ -0,0 +1,55 @@ +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + + +variable "ldap_host" { + type = string + description = "The LDAP server host" +} + +variable "ldap_port" { + type = string + description = "The LDAP server port" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix" +} + +variable "ldap_bind_pass" { + type = string + description = "LDAP bind password" +} + +variable "ldap_username" { + description = "The username of the LDAP user to create" + type = string +} + +variable "ldap_user_old_password" { + description = "The old password of the LDAP user to create" + type = string +} + +variable "ldap_user_role_name" { + description = "The name of the LDAP user role to create" + type = string +} + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} diff --git a/enos/modules/vault_wait_for_cluster_sealed/main.tf b/enos/modules/vault_wait_for_cluster_sealed/main.tf new file mode 100644 index 0000000..17f9e5b --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_sealed/main.tf @@ -0,0 +1,62 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + description = "The Vault cluster instances to verify sealed" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "retry_interval" { + description = "Seconds to wait between retries" + type = number + default = 2 +} + +variable "timeout" { + description = "Max seconds to wait before timing out" + type = number + default = 60 +} + +variable "vault_addr" { + description = "Vault API address" + type = string +} + +variable "vault_install_dir" { + description = "Directory where the Vault binary is installed" + type = string +} + +resource "enos_remote_exec" "verify_node_sealed" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/verify-vault-node-sealed.sh")] + + environment = { + HOST_IPV4 = each.value.public_ip + HOST_IPV6 = each.value.ipv6 + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} \ No newline at end of file diff --git a/enos/modules/vault_wait_for_cluster_sealed/scripts/verify-vault-node-sealed.sh b/enos/modules/vault_wait_for_cluster_sealed/scripts/verify-vault-node-sealed.sh new file mode 100644 index 0000000..8a600ec --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_sealed/scripts/verify-vault-node-sealed.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getStatus() { + $binpath status -format json +} + +isSealed() { + local status ret + status=$(getStatus) + ret=$? + + if [[ $ret -eq 1 ]]; then + echo "failed to get vault status" 1>&2 + return 1 + fi + + if [[ -z "$status" ]]; then + echo "vault status output empty" 1>&2 + return 1 + fi + + if [[ $ret -eq 2 ]]; then + echo "vault is sealed" 1>&2 + return 2 + fi + + echo "vault is unsealed" + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + echo "waiting for vault to be sealed..." + + if isSealed; sealed_rc=$?; [ $sealed_rc -eq 2 ]; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +if [ -n "$HOST_IPV6" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV6} to be sealed" +fi +if [ -n "$HOST_IPV4" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV4} to be sealed" +fi +fail "timed out waiting for Vault cluster to be sealed" diff --git a/enos/template_enos.vars.hcl b/enos/template_enos.vars.hcl new file mode 100644 index 0000000..8d639bd --- /dev/null +++ b/enos/template_enos.vars.hcl @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ========================= IMPORTANT ================================= +// COPY this file to an `enos*.vars.hcl` and fill in the required values. +// ===================================================================== + +// artifactory_token is the token to use when authenticating to artifactory. +// artifactory_token = "yourtoken" + +// artifactory_host is the artifactory host to search for vault artifacts. +// artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" + +// artifactory_repo is the artifactory repo to search for vault artifacts. +// artifactory_repo = "hashicorp-crt-stable-local*" + +// aws_region is the AWS region where we'll create infrastructure +// for the smoke scenario +// aws_region = "us-east-1" + +// aws_ssh_keypair_name is the AWS keypair to use for SSH +// aws_ssh_keypair_name = "enos-ci-ssh-key" + +// aws_ssh_private_key_path is the path to the AWS keypair private key +// aws_ssh_private_key_path = "./support/private_key.pem" + +// backend_log_level is the server log level for the backend. Supported values include 'trace', +// 'debug', 'info', 'warn', 'error'" +// backend_log_level = "trace" + +// backend_instance_type is the instance type to use for the Vault backend. Must support arm64 +// backend_instance_type = "t4g.small" + +// project_name is the description of the project. It will often be used to tag infrastructure +// resources. +// project_name = "vault-openldap-se-enos-integration" + +// distro_version_amzn is the version of Amazon Linux 2 to use for "distro:amzn" variants +// distro_version_amzn = "2" + +// distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants +// distro_version_ubuntu = "22.04" // or "24.04" + +// ldap_artifact_path is the path to the LDAP plugin artifact (zip file) to be installed. +// ldap_artifact_path = "~/go/vault-plugins/vault-plugin-secrets-openldap.zip" + +// ldap_artifactory_repo is the Artifactory repository where the LDAP plugin artifact is stored. +// ldap_artifactory_repo = "hashicorp-vault-ecosystem-staging-local" + +// ldap_base_dn is the base distinguished name for the LDAP directory. +// ldap_base_dn = "dc=example,dc=com" + +// ldap_bind_pass is the password for the LDAP bind distinguished name. +// ldap_bind_pass = "adminpassword" + +// ldap_dynamic_role_ldif_templates_path is the path to the LDIF templates for dynamic roles. +// ldap_dynamic_role_ldif_templates_path = "/tmp" + +// ldap_dynamic_user_role_name is the name of the dynamic role for LDAP users. +// ldap_dynamic_user_role_name = "adam" + +// ldap_library_set_name is the name of the library set to use for the LDAP plugin. +// ldap_library_set_name = "dev-team" + +// ldap_plugin_version is the version of the LDAP plugin being used. +// ldap_plugin_version = "0.15.0" + +// ldap_revision is the git SHA of the LDAP plugin artifact being tested. +// ldap_revision = "2ee1253cb5ff67196d0e4747e8aedd1c4903625f" + +// ldap_rotation_period is the period after which the LDAP root creds will be rotated. +// ldap_rotation_period = "10" // (in seconds) + +// ldap_rotation_window is the time window during which the LDAP root creds can be rotated. +// ldap_rotation_window = "3600" // (in seconds) + +// ldap_schema specifies the LDAP schema to use (e.g., openldap). +// ldap_schema = "openldap" + +// ldap_service_account_names is a list of service account names to be used with the LDAP plugin. +// ldap_service_account_names = ["staticuser", "bob.johnson", "mary.smith"] + +// ldap_tag is the tag or version identifier for the LDAP plugin build. +// ldap_tag = "1.3.0" + +// ldap_username is the username for the LDAP user to authenticate. +// ldap_username = "mary" + +// ldap_user_old_password is the old password for the LDAP user. +// ldap_user_old_password = "defaultpassword" + +// ldap_user_role_name is the name of the role on the Vault side. +// ldap_user_role_name = "mary" + +// makefile_dir is the directory containing the Makefile for building the plugin. +// makefile_dir = "/Users//hashicorp/plugins/vault-plugin-secrets-openldap/" + +// plugin_dest_dir is the local directory where the plugin artifact will be stored. +// plugin_dest_dir = "/Users//go/vault-plugins" + +// plugin_dir_vault is the directory on the Vault server where plugins are installed. +// plugin_dir_vault = "/etc/vault/plugins" + +// plugin_mount_path is the mount path in Vault where the plugin will be enabled. +// plugin_mount_path = "local-secrets-ldap" + +// plugin_name is the name of the Vault plugin to be used for LDAP secrets. +// plugin_name = "vault-plugin-secrets-openldap" + +// tags are a map of tags that will be applied to infrastructure resources that +// support tagging. +// tags = { "Project Name" : "Vault", "Something Cool" : "Value" } + +// terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. +// It must exist. +// terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir + +// ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will +// be appended to the ember test command as '-f=\"\"'. +// ui_test_filter = "sometest" + +// ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a +// cluster will be created but no tests will be run. +// ui_run_tests = true + +// vault_artifact_path is the path to CRT generated or local vault.zip bundle. When +// using the "builder:local" variant a bundle will be built from the current branch. +// In CI it will use the output of the build workflow. +// vault_artifact_path = "./dist/vault.zip" + +// vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. +// It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" +// vault_artifact_type = "bundle" + +// vault_build_date is the build date for Vault artifact. Some validations will require the binary build +// date to match" +// vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example + +// vault_enable_audit_devices sets whether or not to enable every audit device. It true +// a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog +// audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090 +// will be enabled. The netcat program is run in listening mode to provide an endpoint +// that the socket audit device can connect to. +// vault_enable_audit_devices = true + +// vault_install_dir is the directory where the vault binary will be installed on +// the remote machines. +// vault_install_dir = "/opt/vault/bin" + +// vault_local_binary_path is the path of the local binary that we're upgrading to. +// vault_local_binary_path = "./support/vault" + +// vault_instance_type is the instance type to use for the Vault backend +// vault_instance_type = "t3.small" + +// vault_instance_count is how many instances to create for the Vault cluster. +// vault_instance_count = 3 + +// vault_license_path is the path to a valid Vault enterprise edition license. +// This is only required for non-ce editions" +// vault_license_path = "./support/vault.hclic" + +// vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. +// vault_local_build_tags = ["ui", "ent"] + +// vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are +// trace, debug, info, warn, and err." +// vault_log_level = "trace" + +// vault_product_version is the version of Vault we are testing. Some validations will expect the vault +// binary and cluster to report this version. +// vault_product_version = "1.15.0" + +// vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault +// binary and cluster to report this revision. +// vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" \ No newline at end of file